master
ColdWindScholar 2023-09-29 15:59:14 +08:00
parent e762b344d6
commit e6d3a598b5
14 changed files with 8441 additions and 0 deletions

1227
blockimgdiff.py Normal file

File diff suppressed because it is too large Load Diff

1720
common.py Normal file

File diff suppressed because it is too large Load Diff

88
contextpatch.py Normal file
View File

@ -0,0 +1,88 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from re import sub
fix_permission = {"/vendor/bin/hw/android.hardware.wifi@1.0": "u:object_r:hal_wifi_default_exec:s0"}
def scan_context(file) -> dict: # 读取context文件返回一个字典
context = {}
with open(file, "r", encoding='utf-8') as file_:
for i in file_.readlines():
filepath, *other = i.strip().replace('\\', '').split()
context[filepath] = other
if len(other) > 1:
print(f"[Warn] {i[0]} has too much data.")
return context
def scan_dir(folder) -> list: # 读取解包的目录,返回一个字典
part_name = os.path.basename(folder)
allfiles = ['/', '/lost+found', f'/{part_name}/lost+found', f'/{part_name}', f'/{part_name}/']
for root, dirs, files in os.walk(folder, topdown=True):
for dir_ in dirs:
if os.name == 'nt':
allfiles.append(os.path.join(root, dir_).replace(folder, '/' + part_name).replace('\\', '/'))
elif os.name == 'posix':
allfiles.append(os.path.join(root, dir_).replace(folder, '/' + part_name))
for file in files:
if os.name == 'nt':
allfiles.append(os.path.join(root, file).replace(folder, '/' + part_name).replace('\\', '/'))
elif os.name == 'posix':
allfiles.append(os.path.join(root, file).replace(folder, '/' + part_name))
return sorted(set(allfiles), key=allfiles.index)
def context_patch(fs_file, filename) -> dict: # 接收两个字典对比
new_fs = {}
r_new_fs = {}
permission_d = None
try:
permission_d = fs_file.get(list(fs_file)[5])
except IndexError:
pass
if not permission_d:
permission_d = ['u:object_r:system_file:s0']
for i in filename:
if fs_file.get(i):
new_fs[sub(r'([^-_/a-zA-Z0-9])', r'\\\1', i)] = fs_file[i]
else:
permission = permission_d
if i:
if i in fix_permission.keys():
permission = fix_permission[i]
else:
d_arg = True
for e in fs_file.keys():
if (path := os.path.dirname(i)) in e:
if e == path and e[-1:] == '/':
continue
permission = fs_file[e]
d_arg = False
break
if d_arg:
for i_ in r_new_fs.keys():
if (path := os.path.dirname(i)) in i_:
if i_ == path and i_[-1:] == '/':
continue
try:
permission = r_new_fs[i]
except KeyError:
pass
break
print(f"ADD [{i} {permission}]")
r_new_fs[i] = permission
new_fs[sub(r'([^-_/a-zA-Z0-9])', r'\\\1', i)] = permission
return new_fs
def main(dir_path, fs_config) -> None:
origin = scan_context(os.path.abspath(fs_config))
allfiles = scan_dir(os.path.abspath(dir_path))
new_fs = context_patch(origin, allfiles)
with open(fs_config, "w+", encoding='utf-8', newline='\n') as f:
f.writelines([i + " " + " ".join(new_fs[i]) + "\n" for i in sorted(new_fs.keys())])
print("Load origin %d" % (len(origin.keys())) + " entries")
print("Detect total %d" % (len(allfiles)) + " entries")
print('Add %d' % (len(new_fs.keys()) - len(origin.keys())) + " entries")

979
ext4.py Normal file
View File

@ -0,0 +1,979 @@
import ctypes
import functools
import io
import math
import queue
def wcscmp(str_a, str_b):
for a, b in zip(str_a, str_b):
tmp = ord(a) - ord(b)
if tmp != 0:
return -1 if tmp < 0 else 1
tmp = len(str_a) - len(str_b)
return -1 if tmp < 0 else 1 if tmp > 0 else 0
class Ext4Error(Exception):
pass
class BlockMapError(Ext4Error):
pass
class EndOfStreamError(Ext4Error):
pass
class MagicError(Ext4Error):
pass
# ----------------------------- LOW LEVEL ------------------------------
class ext4_struct(ctypes.LittleEndianStructure):
def __getattr__(self, name):
try:
# Combining *_lo and *_hi fields
lo_field = ctypes.LittleEndianStructure.__getattribute__(type(self), name + "_lo")
size = lo_field.size
lo = lo_field.__get__(self)
hi = ctypes.LittleEndianStructure.__getattribute__(self, name + "_hi")
return (hi << (8 * size)) | lo
except AttributeError:
return ctypes.LittleEndianStructure.__getattribute__(self, name)
def __setattr__(self, name, value):
try:
# Combining *_lo and *_hi fields
lo_field = ctypes.LittleEndianStructure.__getattribute__(type(self), name + "_lo")
size = lo_field.size
lo_field.__set__(self, value & ((1 << (8 * size)) - 1))
ctypes.LittleEndianStructure.__setattr__(self, name + "_hi", value >> (8 * size))
except AttributeError:
ctypes.LittleEndianStructure.__setattr__(self, name, value)
class ext4_dir_entry_2(ext4_struct):
_fields_ = [
("inode", ctypes.c_uint), # 0x0
("rec_len", ctypes.c_ushort), # 0x4
("name_len", ctypes.c_ubyte), # 0x6
("file_type", ctypes.c_ubyte) # 0x7
# Variable length field "name" missing at 0x8
]
def _from_buffer_copy(raw, offset=0, platform64=True):
struct = ext4_dir_entry_2.from_buffer_copy(raw, offset)
struct.name = raw[offset + 0x8: offset + 0x8 + struct.name_len]
return struct
class ext4_extent(ext4_struct):
_fields_ = [
("ee_block", ctypes.c_uint), # 0x0000
("ee_len", ctypes.c_ushort), # 0x0004
("ee_start_hi", ctypes.c_ushort), # 0x0006
("ee_start_lo", ctypes.c_uint) # 0x0008
]
class ext4_extent_header(ext4_struct):
_fields_ = [
("eh_magic", ctypes.c_ushort), # 0x0000, Must be 0xF30A
("eh_entries", ctypes.c_ushort), # 0x0002
("eh_max", ctypes.c_ushort), # 0x0004
("eh_depth", ctypes.c_ushort), # 0x0006
("eh_generation", ctypes.c_uint) # 0x0008
]
class ext4_extent_idx(ext4_struct):
_fields_ = [
("ei_block", ctypes.c_uint), # 0x0000
("ei_leaf_lo", ctypes.c_uint), # 0x0004
("ei_leaf_hi", ctypes.c_ushort), # 0x0008
("ei_unused", ctypes.c_ushort) # 0x000A
]
class ext4_group_descriptor(ext4_struct):
_fields_ = [
("bg_block_bitmap_lo", ctypes.c_uint), # 0x0000
("bg_inode_bitmap_lo", ctypes.c_uint), # 0x0004
("bg_inode_table_lo", ctypes.c_uint), # 0x0008
("bg_free_blocks_count_lo", ctypes.c_ushort), # 0x000C
("bg_free_inodes_count_lo", ctypes.c_ushort), # 0x000E
("bg_used_dirs_count_lo", ctypes.c_ushort), # 0x0010
("bg_flags", ctypes.c_ushort), # 0x0012
("bg_exclude_bitmap_lo", ctypes.c_uint), # 0x0014
("bg_block_bitmap_csum_lo", ctypes.c_ushort), # 0x0018
("bg_inode_bitmap_csum_lo", ctypes.c_ushort), # 0x001A
("bg_itable_unused_lo", ctypes.c_ushort), # 0x001C
("bg_checksum", ctypes.c_ushort), # 0x001E
# 64-bit fields
("bg_block_bitmap_hi", ctypes.c_uint), # 0x0020
("bg_inode_bitmap_hi", ctypes.c_uint), # 0x0024
("bg_inode_table_hi", ctypes.c_uint), # 0x0028
("bg_free_blocks_count_hi", ctypes.c_ushort), # 0x002C
("bg_free_inodes_count_hi", ctypes.c_ushort), # 0x002E
("bg_used_dirs_count_hi", ctypes.c_ushort), # 0x0030
("bg_itable_unused_hi", ctypes.c_ushort), # 0x0032
("bg_exclude_bitmap_hi", ctypes.c_uint), # 0x0034
("bg_block_bitmap_csum_hi", ctypes.c_ushort), # 0x0038
("bg_inode_bitmap_csum_hi", ctypes.c_ushort), # 0x003A
("bg_reserved", ctypes.c_uint), # 0x003C
]
@staticmethod
def _from_buffer_copy(raw, platform64=True):
struct = ext4_group_descriptor.from_buffer_copy(raw)
if not platform64:
struct.bg_block_bitmap_hi = 0
struct.bg_inode_bitmap_hi = 0
struct.bg_inode_table_hi = 0
struct.bg_free_blocks_count_hi = 0
struct.bg_free_inodes_count_hi = 0
struct.bg_used_dirs_count_hi = 0
struct.bg_itable_unused_hi = 0
struct.bg_exclude_bitmap_hi = 0
struct.bg_block_bitmap_csum_hi = 0
struct.bg_inode_bitmap_csum_hi = 0
struct.bg_reserved = 0
return struct
class ext4_inode(ext4_struct):
EXT2_GOOD_OLD_INODE_SIZE = 128
# Every field passing 128 bytes is "additional data", whose size is specified by i_extra_isize.
# i_mode
S_IXOTH = 0x1 # Others can execute
S_IWOTH = 0x2 # Others can write
S_IROTH = 0x4 # Others can read
S_IXGRP = 0x8 # Group can execute
S_IWGRP = 0x10 # Group can write
S_IRGRP = 0x20 # Group can read
S_IXUSR = 0x40 # Owner can execute
S_IWUSR = 0x80 # Owner can write
S_IRUSR = 0x100 # Owner can read
S_ISVTX = 0x200 # Sticky bit (only owner can delete)
S_ISGID = 0x400 # Set GID (execute with privileges of group owner of the file's group)
S_ISUID = 0x800 # Set UID (execute with privileges of the file's owner)
S_IFIFO = 0x1000 # FIFO device (named pipe)
S_IFCHR = 0x2000 # Character device (raw, unbuffered, aligned, direct access to hardware storage)
S_IFDIR = 0x4000 # Directory
S_IFBLK = 0x6000 # Block device (buffered, arbitrary access to storage)
S_IFREG = 0x8000 # Regular file
S_IFLNK = 0xA000 # Symbolic link
S_IFSOCK = 0xC000 # Socket
# i_flags
EXT4_INDEX_FL = 0x1000 # Uses hash trees
EXT4_EXTENTS_FL = 0x80000 # Uses extents
EXT4_EA_INODE_FL = 0x200000 # Inode stores large xattr
EXT4_INLINE_DATA_FL = 0x10000000 # Has inline data
_fields_ = [
("i_mode", ctypes.c_ushort), # 0x0000
("i_uid_lo", ctypes.c_ushort), # 0x0002, Originally named i_uid
("i_size_lo", ctypes.c_uint), # 0x0004
("i_atime", ctypes.c_uint), # 0x0008
("i_ctime", ctypes.c_uint), # 0x000C
("i_mtime", ctypes.c_uint), # 0x0010
("i_dtime", ctypes.c_uint), # 0x0014
("i_gid_lo", ctypes.c_ushort), # 0x0018, Originally named i_gid
("i_links_count", ctypes.c_ushort), # 0x001A
("i_blocks_lo", ctypes.c_uint), # 0x001C
("i_flags", ctypes.c_uint), # 0x0020
("osd1", ctypes.c_uint), # 0x0024
("i_block", ctypes.c_uint * 15), # 0x0028
("i_generation", ctypes.c_uint), # 0x0064
("i_file_acl_lo", ctypes.c_uint), # 0x0068
("i_size_hi", ctypes.c_uint), # 0x006C, Originally named i_size_high
("i_obso_faddr", ctypes.c_uint), # 0x0070
("i_osd2_blocks_high", ctypes.c_ushort), # 0x0074, Originally named i_osd2.linux2.l_i_blocks_high
("i_file_acl_hi", ctypes.c_ushort), # 0x0076, Originally named i_osd2.linux2.l_i_file_acl_high
("i_uid_hi", ctypes.c_ushort), # 0x0078, Originally named i_osd2.linux2.l_i_uid_high
("i_gid_hi", ctypes.c_ushort), # 0x007A, Originally named i_osd2.linux2.l_i_gid_high
("i_osd2_checksum_lo", ctypes.c_ushort), # 0x007C, Originally named i_osd2.linux2.l_i_checksum_lo
("i_osd2_reserved", ctypes.c_ushort), # 0x007E, Originally named i_osd2.linux2.l_i_reserved
("i_extra_isize", ctypes.c_ushort), # 0x0080
("i_checksum_hi", ctypes.c_ushort), # 0x0082
("i_ctime_extra", ctypes.c_uint), # 0x0084
("i_mtime_extra", ctypes.c_uint), # 0x0088
("i_atime_extra", ctypes.c_uint), # 0x008C
("i_crtime", ctypes.c_uint), # 0x0090
("i_crtime_extra", ctypes.c_uint), # 0x0094
("i_version_hi", ctypes.c_uint), # 0x0098
("i_projid", ctypes.c_uint), # 0x009C
]
class ext4_superblock(ext4_struct):
EXT2_DESC_SIZE = 0x20 # Default value for s_desc_size, if INCOMPAT_64BIT is not set (NEEDS CONFIRMATION)
EXT2_MIN_DESC_SIZE = 0x20
EXT2_MIN_DESC_SIZE_64BIT = 0x40
# s_feature_incompat
INCOMPAT_64BIT = 0x80 # Uses 64-bit features (e.g. *_hi structure fields in ext4_group_descriptor)
INCOMPAT_32BIT = 0x66
INCOMPAT_FILETYPE = 0x2 # Directory entries record file type (instead of inode flags)
_fields_ = [
("s_inodes_count", ctypes.c_uint), # 0x0000
("s_blocks_count_lo", ctypes.c_uint), # 0x0004
("s_r_blocks_count_lo", ctypes.c_uint), # 0x0008
("s_free_blocks_count_lo", ctypes.c_uint), # 0x000C
("s_free_inodes_count", ctypes.c_uint), # 0x0010
("s_first_data_block", ctypes.c_uint), # 0x0014
("s_log_block_size", ctypes.c_uint), # 0x0018
("s_log_cluster_size", ctypes.c_uint), # 0x001C
("s_blocks_per_group", ctypes.c_uint), # 0x0020
("s_clusters_per_group", ctypes.c_uint), # 0x0024
("s_inodes_per_group", ctypes.c_uint), # 0x0028
("s_mtime", ctypes.c_uint), # 0x002C
("s_wtime", ctypes.c_uint), # 0x0030
("s_mnt_count", ctypes.c_ushort), # 0x0034
("s_max_mnt_count", ctypes.c_ushort), # 0x0036
("s_magic", ctypes.c_ushort), # 0x0038, Must be 0xEF53
("s_state", ctypes.c_ushort), # 0x003A
("s_errors", ctypes.c_ushort), # 0x003C
("s_minor_rev_level", ctypes.c_ushort), # 0x003E
("s_lastcheck", ctypes.c_uint), # 0x0040
("s_checkinterval", ctypes.c_uint), # 0x0044
("s_creator_os", ctypes.c_uint), # 0x0048
("s_rev_level", ctypes.c_uint), # 0x004C
("s_def_resuid", ctypes.c_ushort), # 0x0050
("s_def_resgid", ctypes.c_ushort), # 0x0052
("s_first_ino", ctypes.c_uint), # 0x0054
("s_inode_size", ctypes.c_ushort), # 0x0058
("s_block_group_nr", ctypes.c_ushort), # 0x005A
("s_feature_compat", ctypes.c_uint), # 0x005C
("s_feature_incompat", ctypes.c_uint), # 0x0060
("s_feature_ro_compat", ctypes.c_uint), # 0x0064
("s_uuid", ctypes.c_ubyte * 16), # 0x0068
("s_volume_name", ctypes.c_char * 16), # 0x0078
("s_last_mounted", ctypes.c_char * 64), # 0x0088
("s_algorithm_usage_bitmap", ctypes.c_uint), # 0x00C8
("s_prealloc_blocks", ctypes.c_ubyte), # 0x00CC
("s_prealloc_dir_blocks", ctypes.c_ubyte), # 0x00CD
("s_reserved_gdt_blocks", ctypes.c_ushort), # 0x00CE
("s_journal_uuid", ctypes.c_ubyte * 16), # 0x00D0
("s_journal_inum", ctypes.c_uint), # 0x00E0
("s_journal_dev", ctypes.c_uint), # 0x00E4
("s_last_orphan", ctypes.c_uint), # 0x00E8
("s_hash_seed", ctypes.c_uint * 4), # 0x00EC
("s_def_hash_version", ctypes.c_ubyte), # 0x00FC
("s_jnl_backup_type", ctypes.c_ubyte), # 0x00FD
("s_desc_size", ctypes.c_ushort), # 0x00FE
("s_default_mount_opts", ctypes.c_uint), # 0x0100
("s_first_meta_bg", ctypes.c_uint), # 0x0104
("s_mkfs_time", ctypes.c_uint), # 0x0108
("s_jnl_blocks", ctypes.c_uint * 17), # 0x010C
# 64-bit fields
("s_blocks_count_hi", ctypes.c_uint), # 0x0150
("s_r_blocks_count_hi", ctypes.c_uint), # 0x0154
("s_free_blocks_count_hi", ctypes.c_uint), # 0x0158
("s_min_extra_isize", ctypes.c_ushort), # 0x015C
("s_want_extra_isize", ctypes.c_ushort), # 0x015E
("s_flags", ctypes.c_uint), # 0x0160
("s_raid_stride", ctypes.c_ushort), # 0x0164
("s_mmp_interval", ctypes.c_ushort), # 0x0166
("s_mmp_block", ctypes.c_ulonglong), # 0x0168
("s_raid_stripe_width", ctypes.c_uint), # 0x0170
("s_log_groups_per_flex", ctypes.c_ubyte), # 0x0174
("s_checksum_type", ctypes.c_ubyte), # 0x0175
("s_reserved_pad", ctypes.c_ushort), # 0x0176
("s_kbytes_written", ctypes.c_ulonglong), # 0x0178
("s_snapshot_inum", ctypes.c_uint), # 0x0180
("s_snapshot_id", ctypes.c_uint), # 0x0184
("s_snapshot_r_blocks_count", ctypes.c_ulonglong), # 0x0188
("s_snapshot_list", ctypes.c_uint), # 0x0190
("s_error_count", ctypes.c_uint), # 0x0194
("s_first_error_time", ctypes.c_uint), # 0x0198
("s_first_error_ino", ctypes.c_uint), # 0x019C
("s_first_error_block", ctypes.c_ulonglong), # 0x01A0
("s_first_error_func", ctypes.c_ubyte * 32), # 0x01A8
("s_first_error_line", ctypes.c_uint), # 0x01C8
("s_last_error_time", ctypes.c_uint), # 0x01CC
("s_last_error_ino", ctypes.c_uint), # 0x01D0
("s_last_error_line", ctypes.c_uint), # 0x01D4
("s_last_error_block", ctypes.c_ulonglong), # 0x01D8
("s_last_error_func", ctypes.c_ubyte * 32), # 0x01E0
("s_mount_opts", ctypes.c_ubyte * 64), # 0x0200
("s_usr_quota_inum", ctypes.c_uint), # 0x0240
("s_grp_quota_inum", ctypes.c_uint), # 0x0244
("s_overhead_blocks", ctypes.c_uint), # 0x0248
("s_backup_bgs", ctypes.c_uint * 2), # 0x024C
("s_encrypt_algos", ctypes.c_ubyte * 4), # 0x0254
("s_encrypt_pw_salt", ctypes.c_ubyte * 16), # 0x0258
("s_lpf_ino", ctypes.c_uint), # 0x0268
("s_prj_quota_inum", ctypes.c_uint), # 0x026C
("s_checksum_seed", ctypes.c_uint), # 0x0270
("s_reserved", ctypes.c_uint * 98), # 0x0274
("s_checksum", ctypes.c_uint) # 0x03FC
]
@staticmethod
def _from_buffer_copy(raw, platform64=True):
struct = ext4_superblock.from_buffer_copy(raw)
if not platform64:
struct.s_blocks_count_hi = 0
struct.s_r_blocks_count_hi = 0
struct.s_free_blocks_count_hi = 0
struct.s_min_extra_isize = 0
struct.s_want_extra_isize = 0
struct.s_flags = 0
struct.s_raid_stride = 0
struct.s_mmp_interval = 0
struct.s_mmp_block = 0
struct.s_raid_stripe_width = 0
struct.s_log_groups_per_flex = 0
struct.s_checksum_type = 0
struct.s_reserved_pad = 0
struct.s_kbytes_written = 0
struct.s_snapshot_inum = 0
struct.s_snapshot_id = 0
struct.s_snapshot_r_blocks_count = 0
struct.s_snapshot_list = 0
struct.s_error_count = 0
struct.s_first_error_time = 0
struct.s_first_error_ino = 0
struct.s_first_error_block = 0
struct.s_first_error_func = 0
struct.s_first_error_line = 0
struct.s_last_error_time = 0
struct.s_last_error_ino = 0
struct.s_last_error_line = 0
struct.s_last_error_block = 0
struct.s_last_error_func = 0
struct.s_mount_opts = 0
struct.s_usr_quota_inum = 0
struct.s_grp_quota_inum = 0
struct.s_overhead_blocks = 0
struct.s_backup_bgs = 0
struct.s_encrypt_algos = 0
struct.s_encrypt_pw_salt = 0
struct.s_lpf_ino = 0
struct.s_prj_quota_inum = 0
struct.s_checksum_seed = 0
struct.s_reserved = 0
struct.s_checksum = 0
# if (struct.s_feature_incompat & ext4_superblock.INCOMPAT_64BIT) == 0:
# struct.s_desc_size = ext4_superblock.EXT2_DESC_SIZE
if struct.s_desc_size == 0:
if (struct.s_feature_incompat & ext4_superblock.INCOMPAT_64BIT) == 0:
struct.s_desc_size = ext4_superblock.EXT2_MIN_DESC_SIZE
else:
struct.s_desc_size = ext4_superblock.EXT2_MIN_DESC_SIZE_64BIT
return struct
class ext4_xattr_entry(ext4_struct):
_fields_ = [
("e_name_len", ctypes.c_ubyte), # 0x00
("e_name_index", ctypes.c_ubyte), # 0x01
("e_value_offs", ctypes.c_ushort), # 0x02
("e_value_inum", ctypes.c_uint), # 0x04
("e_value_size", ctypes.c_uint), # 0x08
("e_hash", ctypes.c_uint) # 0x0C
# Variable length field "e_name" missing at 0x10
]
def _from_buffer_copy(raw, offset=0, platform64=True):
struct = ext4_xattr_entry.from_buffer_copy(raw, offset)
struct.e_name = raw[offset + 0x10: offset + 0x10 + struct.e_name_len]
return struct
@property
def _size(self): return 4 * ((ctypes.sizeof(type(self)) + self.e_name_len + 3) // 4) # 4-byte alignment
class ext4_xattr_header(ext4_struct):
_fields_ = [
("h_magic", ctypes.c_uint), # 0x0, Must be 0xEA020000
("h_refcount", ctypes.c_uint), # 0x4
("h_blocks", ctypes.c_uint), # 0x8
("h_hash", ctypes.c_uint), # 0xC
("h_checksum", ctypes.c_uint), # 0x10
("h_reserved", ctypes.c_uint * 3), # 0x14
]
class ext4_xattr_ibody_header(ext4_struct):
_fields_ = [
("h_magic", ctypes.c_uint) # 0x0, Must be 0xEA020000
]
class InodeType:
UNKNOWN = 0x0 # Unknown file type
FILE = 0x1 # Regular file
DIRECTORY = 0x2 # Directory
CHARACTER_DEVICE = 0x3 # Character device
BLOCK_DEVICE = 0x4 # Block device
FIFO = 0x5 # FIFO
SOCKET = 0x6 # Socket
SYMBOLIC_LINK = 0x7 # Symbolic link
CHECKSUM = 0xDE # Checksum entry; not really a file type, but a type of directory entry
# ----------------------------- HIGH LEVEL ------------------------------
class MappingEntry:
def __init__(self, file_block_idx, disk_block_idx, block_count=1):
self.file_block_idx = file_block_idx
self.disk_block_idx = disk_block_idx
self.block_count = block_count
def __iter__(self):
yield self.file_block_idx
yield self.disk_block_idx
yield self.block_count
def __repr__(self):
return "{type:s}({file_block_idx!r:s}, {disk_block_idx!r:s}, {blocK_count!r:s})".format(
blocK_count=self.block_count,
disk_block_idx=self.disk_block_idx,
file_block_idx=self.file_block_idx,
type=type(self).__name__
)
def copy(self):
return MappingEntry(self.file_block_idx, self.disk_block_idx, self.block_count)
def create_mapping(*entries):
file_block_idx = 0
result = [None] * len(entries)
for i, entry in enumerate(entries):
disk_block_idx, block_count = entry
result[i] = MappingEntry(file_block_idx, disk_block_idx, block_count)
file_block_idx += block_count
return result
def optimize(entries):
entries.sort(key=lambda entry: entry.file_block_idx)
idx = 0
while idx < len(entries):
while idx + 1 < len(entries) \
and entries[idx].file_block_idx + entries[idx].block_count == entries[idx + 1].file_block_idx \
and entries[idx].disk_block_idx + entries[idx].block_count == entries[idx + 1].disk_block_idx:
tmp = entries.pop(idx + 1)
entries[idx].block_count += tmp.block_count
idx += 1
class Volume:
ROOT_INODE = 2
def __init__(self, stream, offset=0, ignore_flags=False, ignore_magic=False):
self.ignore_flags = ignore_flags
self.ignore_magic = ignore_magic
self.offset = offset
self.platform64 = True # Initial value needed for Volume.read_struct
self.stream = stream
# Superblock
self.superblock = self.read_struct(ext4_superblock, 0x400)
self.platform64 = (self.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_64BIT) != 0
if not ignore_magic and self.superblock.s_magic != 0xEF53:
raise MagicError("Invalid magic value in superblock: 0x{magic:04X} (expected 0xEF53)".format(
magic=self.superblock.s_magic))
# Group descriptors
self.group_descriptors = [None] * (self.superblock.s_inodes_count // self.superblock.s_inodes_per_group)
group_desc_table_offset = (0x400 // self.block_size + 1) * self.block_size # First block after superblock
for group_desc_idx in range(len(self.group_descriptors)):
group_desc_offset = group_desc_table_offset + group_desc_idx * self.superblock.s_desc_size
self.group_descriptors[group_desc_idx] = self.read_struct(ext4_group_descriptor, group_desc_offset)
def __repr__(self):
return "{type_name:s}(volume_name = {volume_name!r:s}, uuid = {uuid!r:s}, last_mounted = {last_mounted!r:s})".format(
last_mounted=self.superblock.s_last_mounted,
type_name=type(self).__name__,
uuid=self.uuid,
volume_name=self.superblock.s_volume_name
)
@property
def block_size(self):
return 1 << (10 + self.superblock.s_log_block_size)
def get_inode(self, inode_idx, file_type=InodeType.UNKNOWN):
group_idx, inode_table_entry_idx = self.get_inode_group(inode_idx)
inode_table_offset = self.group_descriptors[group_idx].bg_inode_table * self.block_size
inode_offset = inode_table_offset + inode_table_entry_idx * self.superblock.s_inode_size
return Inode(self, inode_offset, inode_idx, file_type)
def get_inode_group(self, inode_idx):
group_idx = (inode_idx - 1) // self.superblock.s_inodes_per_group
inode_table_entry_idx = (inode_idx - 1) % self.superblock.s_inodes_per_group
return group_idx, inode_table_entry_idx
def read(self, offset, byte_len):
if self.offset + offset != self.stream.tell():
self.stream.seek(self.offset + offset, io.SEEK_SET)
return self.stream.read(byte_len)
def read_struct(self, structure, offset, platform64=None):
raw = self.read(offset, ctypes.sizeof(structure))
if hasattr(structure, "_from_buffer_copy"):
return structure._from_buffer_copy(raw, platform64=platform64 if platform64 else self.platform64)
else:
return structure.from_buffer_copy(raw)
@property
def root(self):
return self.get_inode(Volume.ROOT_INODE, InodeType.DIRECTORY)
@property
def uuid(self):
uuid = self.superblock.s_uuid
uuid = [uuid[:4], uuid[4: 6], uuid[6: 8], uuid[8: 10], uuid[10:]]
return "-".join("".join("{0:02X}".format(c) for c in part) for part in uuid)
class Inode:
def __init__(self, volume, offset, inode_idx, file_type=InodeType.UNKNOWN):
self.inode_idx = inode_idx
self.offset = offset
self.volume = volume
self.file_type = file_type
self.inode = volume.read_struct(ext4_inode, offset)
def __len__(self):
return self.inode.i_size
def __repr__(self):
if self.inode_idx is not None:
return "{type_name:s}(inode_idx = {inode!r:s}, offset = 0x{offset:X}, volume_uuid = {uuid!r:s})".format(
inode=self.inode_idx,
offset=self.offset,
type_name=type(self).__name__,
uuid=self.volume.uuid
)
else:
return "{type_name:s}(offset = 0x{offset:X}, volume_uuid = {uuid!r:s})".format(
offset=self.offset,
type_name=type(self).__name__,
uuid=self.volume.uuid
)
def _parse_xattrs(self, raw_data, offset, prefix_override: dict = None):
prefixes = {
0: "",
1: "user.",
2: "system.posix_acl_access",
3: "system.posix_acl_default",
4: "trusted.",
6: "security.",
7: "system.",
8: "system.richacl"
}
prefixes.update(prefixes)
# Iterator over ext4_xattr_entry structures
i = 0
while i < len(raw_data):
xattr_entry = ext4_xattr_entry._from_buffer_copy(raw_data, i, platform64=self.volume.platform64)
if (
xattr_entry.e_name_len | xattr_entry.e_name_index | xattr_entry.e_value_offs | xattr_entry.e_value_inum) == 0:
# End of ext4_xattr_entry list
break
if not xattr_entry.e_name_index in prefixes:
raise Ext4Error("Unknown attribute prefix {prefix:d} in inode {inode:d}".format(
inode=self.inode_idx,
prefix=xattr_entry.e_name_index
))
xattr_name = prefixes[xattr_entry.e_name_index] + xattr_entry.e_name.decode("iso-8859-2")
if xattr_entry.e_value_inum != 0:
# external xattr
xattr_inode = self.volume.get_inode(xattr_entry.e_value_inum, InodeType.FILE)
if not self.volume.ignore_flags and (xattr_inode.inode.i_flags & ext4_inode.EXT4_EA_INODE_FL) != 0:
raise Ext4Error(
"Inode {value_indoe:d} associated with the extended attribute {xattr_name!r:s} of inode {inode:d} is not marked as large extended attribute value.".format(
inode=self.inode_idx,
value_inode=xattr_inode.inode_idx,
xattr_name=xattr_name
))
# TODO Use xattr_entry.e_value_size or xattr_inode.inode.i_size?
xattr_value = xattr_inode.open_read().read()
else:
# internal xattr
xattr_value = raw_data[
xattr_entry.e_value_offs + offset: xattr_entry.e_value_offs + offset + xattr_entry.e_value_size]
yield xattr_name, xattr_value
i += xattr_entry._size
@staticmethod
def directory_entry_comparator(dir_a, dir_b):
file_name_a, _, file_type_a = dir_a
file_name_b, _, file_type_b = dir_b
if file_type_a == InodeType.DIRECTORY == file_type_b or file_type_a != InodeType.DIRECTORY != file_type_b:
tmp = wcscmp(file_name_a.lower(), file_name_b.lower())
return tmp if tmp != 0 else wcscmp(file_name_a, file_name_b)
else:
return -1 if file_type_a == InodeType.DIRECTORY else 1
directory_entry_key = functools.cmp_to_key(directory_entry_comparator)
def get_inode(self, *relative_path, decode_name=None):
if not self.is_dir:
raise Ext4Error("Inode {inode:d} is not a directory.".format(inode=self.inode_idx))
current_inode = self
for i, part in enumerate(relative_path):
if not self.volume.ignore_flags and not current_inode.is_dir:
current_path = "/".join(relative_path[:i])
raise Ext4Error("{current_path!r:s} (Inode {inode:d}) is not a directory.".format(
current_path=current_path,
inode=inode_idx
))
file_name, inode_idx, file_type = next(
filter(lambda entry: entry[0] == part, current_inode.open_dir(decode_name)), (None, None, None))
if inode_idx is None:
current_path = "/".join(relative_path[:i])
raise FileNotFoundError("{part!r:s} not found in {current_path!r:s} (Inode {inode:d}).".format(
current_path=current_path,
inode=current_inode.inode_idx,
part=part
))
current_inode = current_inode.volume.get_inode(inode_idx, file_type)
return current_inode
@property
def is_dir(self):
if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0:
return (self.inode.i_mode & ext4_inode.S_IFDIR) != 0
else:
return self.file_type == InodeType.DIRECTORY
@property
def is_file(self):
if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0:
return (self.inode.i_mode & ext4_inode.S_IFREG) != 0
else:
return self.file_type == InodeType.FILE
@property
def is_symlink(self):
if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0:
return (self.inode.i_mode & ext4_inode.S_IFLNK) != 0
else:
return self.file_type == InodeType.SYMBOLIC_LINK
@property
def is_in_use(self):
group_idx, bitmap_bit = self.volume.get_inode_group(self.inode_idx)
inode_usage_bitmap_offset = self.volume.group_descriptors[group_idx].bg_inode_bitmap * self.volume.block_size
inode_usage_byte = self.volume.read(inode_usage_bitmap_offset + bitmap_bit // 8, 1)[0]
return ((inode_usage_byte >> (7 - bitmap_bit % 8)) & 1) != 0
@property
def mode_str(self):
special_flag = lambda letter, execute, special: {
(False, False): "-",
(False, True): letter.upper(),
(True, False): "x",
(True, True): letter.lower()
}[(execute, special)]
try:
if (self.volume.superblock.s_feature_incompat & ext4_superblock.INCOMPAT_FILETYPE) == 0:
device_type = {
ext4_inode.S_IFIFO: "p",
ext4_inode.S_IFCHR: "c",
ext4_inode.S_IFDIR: "d",
ext4_inode.S_IFBLK: "b",
ext4_inode.S_IFREG: "-",
ext4_inode.S_IFLNK: "l",
ext4_inode.S_IFSOCK: "s",
}[self.inode.i_mode & 0xF000]
else:
device_type = {
InodeType.FILE: "-",
InodeType.DIRECTORY: "d",
InodeType.CHARACTER_DEVICE: "c",
InodeType.BLOCK_DEVICE: "b",
InodeType.FIFO: "p",
InodeType.SOCKET: "s",
InodeType.SYMBOLIC_LINK: "l"
}[self.file_type]
except KeyError:
device_type = "?"
return "".join([
device_type,
"r" if (self.inode.i_mode & ext4_inode.S_IRUSR) != 0 else "-",
"w" if (self.inode.i_mode & ext4_inode.S_IWUSR) != 0 else "-",
special_flag("s", (self.inode.i_mode & ext4_inode.S_IXUSR) != 0,
(self.inode.i_mode & ext4_inode.S_ISUID) != 0),
"r" if (self.inode.i_mode & ext4_inode.S_IRGRP) != 0 else "-",
"w" if (self.inode.i_mode & ext4_inode.S_IWGRP) != 0 else "-",
special_flag("s", (self.inode.i_mode & ext4_inode.S_IXGRP) != 0,
(self.inode.i_mode & ext4_inode.S_ISGID) != 0),
"r" if (self.inode.i_mode & ext4_inode.S_IROTH) != 0 else "-",
"w" if (self.inode.i_mode & ext4_inode.S_IWOTH) != 0 else "-",
special_flag("t", (self.inode.i_mode & ext4_inode.S_IXOTH) != 0,
(self.inode.i_mode & ext4_inode.S_ISVTX) != 0),
])
def open_dir(self, decode_name=None):
# Parse args
if decode_name is None:
decode_name = lambda raw: raw.decode("utf8")
if not self.volume.ignore_flags and not self.is_dir:
raise Ext4Error("Inode ({inode:d}) is not a directory.".format(inode=self.inode_idx))
# # Hash trees are compatible with linear arrays
if (self.inode.i_flags & ext4_inode.EXT4_INDEX_FL) != 0:
pass
# Read raw directory content
raw_data = self.open_read().read()
offset = 0
while offset < len(raw_data):
dirent = ext4_dir_entry_2._from_buffer_copy(raw_data, offset, platform64=self.volume.platform64)
if dirent.file_type != InodeType.CHECKSUM:
yield decode_name(dirent.name), dirent.inode, dirent.file_type
offset += dirent.rec_len
def open_read(self):
if (self.inode.i_flags & ext4_inode.EXT4_EXTENTS_FL) != 0:
# Obtain mapping from extents
mapping = [] # List of MappingEntry instances
nodes = queue.Queue()
nodes.put_nowait(self.offset + ext4_inode.i_block.offset)
while nodes.qsize() != 0:
header_offset = nodes.get_nowait()
header = self.volume.read_struct(ext4_extent_header, header_offset)
if not self.volume.ignore_magic and header.eh_magic != 0xF30A:
raise MagicError(
"Invalid magic value in extent header at offset 0x{header_offset:X} of inode {inode:d}: 0x{header_magic:04X} (expected 0xF30A)".format(
header_magic=header.eh_magic,
header_offset=self.inode_idx,
inode=self.inode_idx
))
if header.eh_depth != 0:
indices = self.volume.read_struct(ext4_extent_idx * header.eh_entries,
header_offset + ctypes.sizeof(ext4_extent_header))
for idx in indices: nodes.put_nowait(idx.ei_leaf * self.volume.block_size)
else:
extents = self.volume.read_struct(ext4_extent * header.eh_entries,
header_offset + ctypes.sizeof(ext4_extent_header))
for extent in extents:
mapping.append(MappingEntry(extent.ee_block, extent.ee_start, extent.ee_len))
MappingEntry.optimize(mapping)
return BlockReader(self.volume, len(self), mapping)
else:
# Inode uses inline data
i_block = self.volume.read(self.offset + ext4_inode.i_block.offset, ext4_inode.i_block.size)
return io.BytesIO(i_block[:self.inode.i_size])
@property
def size_readable(self):
if self.inode.i_size < 1024:
return "{0:d} bytes".format(self.inode.i_size) if self.inode.i_size != 1 else "1 byte"
else:
units = ["KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"]
unit_idx = min(int(math.log(self.inode.i_size, 1024)), len(units))
return "{size:.2f} {unit:s}".format(
size=self.inode.i_size / (1024 ** unit_idx),
unit=units[unit_idx - 1]
)
def xattrs(self, check_inline=True, check_block=True, force_inline=False, prefix_override: dict = None):
# Inline xattrs
inline_data_offset = self.offset + ext4_inode.EXT2_GOOD_OLD_INODE_SIZE + self.inode.i_extra_isize
inline_data_length = self.offset + self.volume.superblock.s_inode_size - inline_data_offset
if check_inline and inline_data_length > ctypes.sizeof(ext4_xattr_ibody_header):
inline_data = self.volume.read(inline_data_offset, inline_data_length)
xattrs_header = ext4_xattr_ibody_header.from_buffer_copy(inline_data)
# TODO Find way to detect inline xattrs without checking the h_magic field to enable error detection with the h_magic field.
if force_inline or xattrs_header.h_magic == 0xEA020000:
offset = 4 * ((ctypes.sizeof(
ext4_xattr_ibody_header) + 3) // 4) # The ext4_xattr_entry following the header is aligned on a 4-byte boundary
try:
for xattr_name, xattr_value in self._parse_xattrs(inline_data[offset:], 0,
prefix_override=prefix_override):
yield xattr_name, xattr_value
except:
pass
# xattr block(s)
if check_block and self.inode.i_file_acl != 0:
xattrs_block_start = self.inode.i_file_acl * self.volume.block_size
xattrs_block = self.volume.read(xattrs_block_start, self.volume.block_size)
xattrs_header = ext4_xattr_header.from_buffer_copy(xattrs_block)
if not self.volume.ignore_magic and xattrs_header.h_magic != 0xEA020000:
try:
raise MagicError(
"Invalid magic value in xattrs block header at offset 0x{xattrs_block_start:X} of inode {inode:d}: 0x{xattrs_header} (expected 0xEA020000)".format(
inode=self.inode_idx,
xattrs_block_start=xattrs_block_start,
xattrs_header=xattrs_header.h_magic
))
except:
pass
if xattrs_header.h_blocks != 1:
raise Ext4Error(
"Invalid number of xattr blocks at offset 0x{xattrs_block_start:X} of inode {inode:d}: {xattrs_header:d} (expected 1)".format(
inode=self.inode_idx,
xattrs_header=xattrs_header.h_blocks,
xattrs_block_start=xattrs_block_start
))
offset = 4 * ((ctypes.sizeof(
ext4_xattr_header) + 3) // 4) # The ext4_xattr_entry following the header is aligned on a 4-byte boundary
for xattr_name, xattr_value in self._parse_xattrs(xattrs_block[offset:], -offset,
prefix_override=prefix_override):
yield xattr_name, xattr_value
class BlockReader:
# OSError
EINVAL = 22
def __init__(self, volume, byte_size, block_map):
self.byte_size = byte_size
self.volume = volume
self.cursor = 0
block_map = list(map(MappingEntry.copy, block_map))
# Optimize mapping (stich together)
MappingEntry.optimize(block_map)
self.block_map = block_map
def __repr__(self):
return "{type_name:s}(byte_size = {size!r:s}, block_map = {block_map!r:s}, volume_uuid = {uuid!r:s})".format(
block_map=self.block_map,
size=self.byte_size,
type_name=type(self).__name__,
uuid=self.volume.uuid
)
def get_block_mapping(self, file_block_idx):
disk_block_idx = None
# Find disk block
for entry in self.block_map:
if entry.file_block_idx <= file_block_idx < entry.file_block_idx + entry.block_count:
block_diff = file_block_idx - entry.file_block_idx
disk_block_idx = entry.disk_block_idx + block_diff
break
return disk_block_idx
def read(self, byte_len=-1):
# Parse args
if byte_len < -1:
raise ValueError("byte_len must be non-negative or -1")
bytes_remaining = self.byte_size - self.cursor
byte_len = bytes_remaining if byte_len == -1 else max(0, min(byte_len, bytes_remaining))
if byte_len == 0:
return b""
# Reading blocks
start_block_idx = self.cursor // self.volume.block_size
end_block_idx = (self.cursor + byte_len - 1) // self.volume.block_size
end_of_stream_check = byte_len
blocks = [self.read_block(i) for i in range(start_block_idx, end_block_idx - start_block_idx + 1)]
start_offset = self.cursor % self.volume.block_size
if start_offset != 0:
blocks[0] = blocks[0][start_offset:]
byte_len = (byte_len + start_offset - self.volume.block_size - 1) % self.volume.block_size + 1
blocks[-1] = blocks[-1][:byte_len]
result = b"".join(blocks)
# Check read
if len(result) != end_of_stream_check:
raise EndOfStreamError(
"The volume's underlying stream ended {0:d} bytes before EOF.".format(byte_len - len(result)))
self.cursor += len(result)
return result
def read_block(self, file_block_idx):
disk_block_idx = self.get_block_mapping(file_block_idx)
if disk_block_idx is not None:
return self.volume.read(disk_block_idx * self.volume.block_size, self.volume.block_size)
else:
return bytes([0] * self.volume.block_size)
def seek(self, seek, seek_mode=io.SEEK_SET):
if seek_mode == io.SEEK_CUR:
seek += self.cursor
elif seek_mode == io.SEEK_END:
seek += self.byte_size
# elif seek_mode == io.SEEK_SET:
# seek += 0
if seek < 0:
raise OSError(BlockReader.EINVAL, "Invalid argument") # Exception behavior copied from IOBase.seek
self.cursor = seek
return seek
def tell(self):
return self.cursor

122
fspatch.py Normal file
View File

@ -0,0 +1,122 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
def scanfs(file) -> dict:
filesystem_config = {}
with open(file, "r") as file_:
for i in file_.readlines():
filepath, *other = i.strip().split()
filesystem_config[filepath] = other
if long := len(other) > 4:
print(f"[Warn] {i[0]} has too much data-{long}.")
return filesystem_config
def scan_dir(folder) -> bool and list:
allfiles = ['/']
if os.name == 'nt':
allfiles.append(os.path.basename(folder).replace('\\', ''))
elif os.name == 'posix':
allfiles.append(os.path.basename(folder).replace('/', ''))
else:
return False
for root, dirs, files in os.walk(folder, topdown=True):
for dir_ in dirs:
if os.name == 'nt':
allfiles.append(os.path.join(root, dir_).replace(folder, os.path.basename(folder)).replace('\\', '/'))
elif os.name == 'posix':
allfiles.append(os.path.join(root, dir_).replace(folder, os.path.basename(folder)))
for file in files:
if os.name == 'nt':
allfiles.append(os.path.join(root, file).replace(folder, os.path.basename(folder)).replace('\\', '/'))
elif os.name == 'posix':
allfiles.append(os.path.join(root, file).replace(folder, os.path.basename(folder)))
return allfiles
def islink(file) -> str and None:
if os.name == 'nt':
if not os.path.isdir(file):
with open(file, 'rb') as f:
if f.read(12) == b'!<symlink>\xff\xfe':
return f.read().decode("utf-8").replace('\x00', '')
else:
return
elif os.name == 'posix':
if os.path.islink(file):
return os.readlink(file)
else:
return
def fs_patch(fs_file, filename, dir_path) -> dict: # 接收两个字典对比
new_fs = {}
for i in filename:
if fs_file.get(i):
new_fs[i] = fs_file[i]
else:
if os.name == 'nt':
filepath = os.path.abspath(dir_path + os.sep + ".." + os.sep + i.replace('/', '\\'))
elif os.name == 'posix':
filepath = os.path.abspath(dir_path + os.sep + ".." + os.sep + i)
else:
filepath = os.path.abspath(dir_path + os.sep + ".." + os.sep + i)
if os.path.isdir(filepath):
uid = '0'
if "system/bin" in i or "system/xbin" in i or "vendor/bin" in i:
gid = '2000'
else:
gid = '0'
mode = '0755' # dir path always 755
config = [uid, gid, mode]
elif islink(filepath):
uid = '0'
if ("system/bin" in i) or ("system/xbin" in i) or ("vendor/bin" in i):
gid = '2000'
else:
gid = '0'
if ("/bin" in i) or ("/xbin" in i):
mode = '0755'
elif ".sh" in i:
mode = "0750"
else:
mode = "0644"
link = islink(filepath)
config = [uid, gid, mode, link]
elif ("/bin" in i) or ("/xbin" in i):
uid = '0'
mode = '0755'
if ("system/bin" in i) or ("system/xbin" in i) or ("vendor/bin" in i):
gid = '2000'
else:
gid = '0'
mode = '0755'
if ".sh" in i:
mode = "0750"
else:
for s in ["/bin/su", "/xbin/su", "disable_selinux.sh", "daemon", "ext/.su", "install-recovery",
'installed_su', 'bin/rw-system.sh', 'bin/getSPL']:
if s in i:
mode = "0755"
config = [uid, gid, mode]
else:
uid = '0'
gid = '0'
mode = '0644'
config = [uid, gid, mode]
print(f'Add [{i}{config}]')
new_fs[i] = config
return new_fs
def main(dir_path, fs_config) -> None:
origin_fs = scanfs(os.path.abspath(fs_config))
allfiles = scan_dir(os.path.abspath(dir_path))
new_fs = fs_patch(origin_fs, allfiles, dir_path)
with open(fs_config, "w", encoding='utf-8', newline='\n') as f:
f.writelines([i + " " + " ".join(new_fs[i]) + "\n" for i in sorted(new_fs.keys())])
print("Load origin %d" % (len(origin_fs.keys())) + " entries")
print("Detect total %d" % (len(allfiles)) + " entries")
print('Add %d' % (len(new_fs.keys()) - len(origin_fs.keys())) + " entries")

673
imgextractor.py Normal file
View File

@ -0,0 +1,673 @@
import mmap
import os
import re
import shutil
import struct
import subprocess
import traceback
from timeit import default_timer as dti
EXT4_HEADER_MAGIC = 0xED26FF3A
EXT4_SPARSE_HEADER_LEN = 28
EXT4_CHUNK_HEADER_SIZE = 12
class ext4_file_header(object):
def __init__(self, buf):
(self.magic,
self.major,
self.minor,
self.file_header_size,
self.chunk_header_size,
self.block_size,
self.total_blocks,
self.total_chunks,
self.crc32) = struct.unpack('<I4H4I', buf)
class ext4_chunk_header(object):
def __init__(self, buf):
(self.type,
self.reserved,
self.chunk_size,
self.total_size) = struct.unpack('<2H2I', buf)
class Extractor(object):
def __init__(self):
self.FileName = ""
self.BASE_DIR = ""
self.OUTPUT_IMAGE_FILE = ""
self.EXTRACT_DIR = ""
self.BLOCK_SIZE = 4096
self.TYPE_IMG = 'system'
self.context = []
self.fsconfig = []
def __remove(self, path):
if os.path.isfile(path):
os.remove(path) # remove the file
elif os.path.isdir(path):
shutil.rmtree(path) # remove dir and all contains
else:
raise ValueError("file {} is not a file or dir.".format(path))
def __logtb(self, ex, ex_traceback=None):
if ex_traceback is None:
ex_traceback = ex.__traceback__
tb_lines = [line.rstrip('\n') for line in
traceback.format_exception(ex.__class__, ex, ex_traceback)]
return '\n'.join(tb_lines)
def __file_name(self, file_path):
name = os.path.basename(file_path).rsplit('.', 1)[0]
name = name.split('-')[0]
name = name.split(' ')[0]
name = name.split('+')[0]
name = name.split('{')[0]
name = name.split('(')[0]
return name
def __out_name(self, file_path):
name = file_path
name = name.split('-')[0]
name = name.split(' ')[0]
name = name.split('+')[0]
name = name.split('{')[0]
name = name.split('(')[0]
return name
def __appendf(self, msg, log_file):
with open(log_file, 'a', newline='\n') as file:
print(msg, file=file)
def __getperm(self, arg):
if len(arg) < 9 or len(arg) > 10:
return
if len(arg) > 8:
arg = arg[1:]
oor, ow, ox, gr, gw, gx, wr, ww, wx = list(arg)
o, g, w, s = 0, 0, 0, 0
if oor == 'r': o += 4
if ow == 'w': o += 2
if ox == 'x': o += 1
if ox == 'S': s += 4
if ox == 's': s += 4; o += 1
if gr == 'r': g += 4
if gw == 'w': g += 2
if gx == 'x': g += 1
if gx == 'S': s += 2
if gx == 's': s += 2; g += 1
if wr == 'r': w += 4
if ww == 'w': w += 2
if wx == 'x': w += 1
if wx == 'T': s += 1
if wx == 't': s += 1; w += 1
return str(s) + str(o) + str(g) + str(w)
def __ext4extractor(self):
import ext4, string, struct
fs_config_file = self.FileName + '_fs_config'
fuking_symbols = '\\^$.|?*+(){}[]'
contexts = self.CONFING_DIR + os.sep + self.FileName + "_file_contexts" # 08.05.18
def scan_dir(root_inode, root_path=""):
for entry_name, entry_inode_idx, entry_type in root_inode.open_dir():
if entry_name in ['.', '..'] or entry_name.endswith(' (2)'):
continue
entry_inode = root_inode.volume.get_inode(entry_inode_idx, entry_type)
entry_inode_path = root_path + '/' + entry_name
mode = self.__getperm(entry_inode.mode_str)
uid = entry_inode.inode.i_uid
gid = entry_inode.inode.i_gid
con = ''
cap = ''
for i in list(entry_inode.xattrs()):
if i[0] == 'security.selinux':
con = i[1].decode('utf8')[:-1]
elif i[0] == 'security.capability':
raw_cap = struct.unpack("<5I", i[1])
if raw_cap[1] > 65535:
cap = '' + str(hex(int('%04x%04x' % (raw_cap[3], raw_cap[1]), 16)))
else:
cap = '' + str(hex(int('%04x%04x%04x' % (raw_cap[3], raw_cap[2], raw_cap[1]), 16)))
cap = ' capabilities={cap}'.format(cap=cap)
if entry_inode.is_dir:
dir_target = self.EXTRACT_DIR + entry_inode_path.replace(' ', '_').replace('"', '')
if not os.path.isdir(dir_target):
os.makedirs(dir_target)
if os.name == 'posix' and os.geteuid() == 0:
os.chmod(dir_target, int(mode, 8))
os.chown(dir_target, uid, gid)
scan_dir(entry_inode, entry_inode_path)
if cap == '' and con == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s' % (tmppath, uid, gid, mode))
else:
self.fsconfig.append('%s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode))
else:
if cap == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s' % (tmppath, uid, gid, mode))
else:
self.fsconfig.append('%s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode))
for fuk_symb in fuking_symbols:
tmppath = tmppath.replace(fuk_symb, '\\' + fuk_symb)
self.context.append('/%s %s' % (tmppath, con))
else:
if con == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s' % (tmppath, uid, gid, mode + cap))
else:
self.fsconfig.append(
'%s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode + cap))
else:
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s' % (tmppath, uid, gid, mode + cap))
else:
self.fsconfig.append(
'%s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode + cap))
for fuk_symb in fuking_symbols:
tmppath = tmppath.replace(fuk_symb, '\\' + fuk_symb)
self.context.append('/%s %s' % (tmppath, con))
elif entry_inode.is_file:
wdone = None
if os.name == 'nt':
if entry_name.endswith('/'):
entry_name = entry_name[:-1]
file_target = self.EXTRACT_DIR + entry_inode_path.replace('/', os.sep).replace(' ',
'_').replace('"',
'')
elif os.name == "posix":
file_target = self.EXTRACT_DIR + entry_inode_path.replace(' ', '_').replace('"', '')
try:
with open(file_target, 'wb') as out:
out.write(entry_inode.open_read().read())
except:
print(f'ERROR:Cannot Write {file_target}')
if os.name == 'posix':
if os.geteuid() == 0:
os.chmod(file_target, int(mode, 8))
os.chown(file_target, uid, gid)
if cap == '' and con == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s' % (tmppath, uid, gid, mode))
else:
self.fsconfig.append('%s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode))
else:
if cap == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s' % (tmppath, uid, gid, mode))
else:
self.fsconfig.append('%s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode))
for fuk_symb in fuking_symbols:
tmppath = tmppath.replace(fuk_symb, '\\' + fuk_symb)
self.context.append('/%s %s' % (tmppath, con))
else:
if con == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s' % (tmppath, uid, gid, mode + cap))
else:
self.fsconfig.append(
'%s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode + cap))
else:
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s' % (tmppath, uid, gid, mode + cap))
else:
self.fsconfig.append(
'%s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode + cap))
for fuk_symb in fuking_symbols:
tmppath = tmppath.replace(fuk_symb, '\\' + fuk_symb)
self.context.append('/%s %s' % (tmppath, con))
elif entry_inode.is_symlink:
try:
link_target = entry_inode.open_read().read().decode("utf8")
target = self.EXTRACT_DIR + entry_inode_path.replace(' ', '_')
if cap == '' and con == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s %s' % (tmppath, uid, gid, mode, link_target))
else:
self.fsconfig.append(
'%s %s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode, link_target))
else:
if cap == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s %s' % (tmppath, uid, gid, mode, link_target))
else:
self.fsconfig.append(
'%s %s %s %s %s' % (self.DIR + entry_inode_path, uid, gid, mode, link_target))
for fuk_symb in fuking_symbols:
tmppath = tmppath.replace(fuk_symb, '\\' + fuk_symb)
self.context.append('/%s %s' % (tmppath, con))
else:
if con == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append(
'%s %s %s %s %s' % (tmppath, uid, gid, mode + cap, link_target))
else:
self.fsconfig.append('%s %s %s %s %s' % (
self.DIR + entry_inode_path, uid, gid, mode + cap, link_target))
else:
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append(
'%s %s %s %s %s' % (tmppath, uid, gid, mode + cap, link_target))
else:
self.fsconfig.append('%s %s %s %s %s' % (
self.DIR + entry_inode_path, uid, gid, mode + cap, link_target))
for fuk_symb in fuking_symbols:
tmppath = tmppath.replace(fuk_symb, '\\' + fuk_symb)
self.context.append('/%s %s' % (tmppath, con))
if os.path.islink(target):
try:
os.remove(target)
except:
pass
if os.path.isfile(target):
try:
os.remove(target)
except:
pass
if os.name == 'posix':
os.symlink(link_target, target)
if os.name == 'nt':
with open(target.replace('/', os.sep), 'wb') as out:
tmp = bytes.fromhex('213C73796D6C696E6B3EFFFE')
for index in list(link_target):
tmp = tmp + struct.pack('>sx', index.encode('utf-8'))
out.write(tmp + struct.pack('xx'))
subprocess.Popen('attrib +s "%s"' % target.replace('/', os.sep), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not all(c in string.printable for c in link_target):
pass
if entry_inode_path[1:] == entry_name or link_target[1:] == entry_name:
self.symlinks.append('%s %s' % (link_target, entry_inode_path[1:]))
else:
self.symlinks.append('%s %s' % (link_target, self.DIR + entry_inode_path))
except:
try:
link_target_block = int.from_bytes(entry_inode.open_read().read(), "little")
link_target = root_inode.volume.read(link_target_block * root_inode.volume.block_size,
entry_inode.inode.i_size).decode("utf8")
target = self.EXTRACT_DIR + entry_inode_path.replace(' ', '_')
if link_target and all(c in string.printable for c in link_target):
if cap == '' and con == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append('%s %s %s %s %s' % (tmppath, uid, gid, mode, link_target))
else:
self.fsconfig.append('%s %s %s %s %s' % (
self.DIR + entry_inode_path, uid, gid, mode, link_target))
else:
if cap == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append(
'%s %s %s %s %s' % (tmppath, uid, gid, mode, link_target))
else:
self.fsconfig.append('%s %s %s %s %s' % (
self.DIR + entry_inode_path, uid, gid, mode, link_target))
for fuk_symb in fuking_symbols:
tmppath = tmppath.replace(fuk_symb, '\\' + fuk_symb)
self.context.append('/%s %s' % (tmppath, con))
else:
if con == '':
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append(
'%s %s %s %s %s' % (tmppath, uid, gid, mode + cap, link_target))
else:
self.fsconfig.append('%s %s %s %s %s' % (
self.DIR + entry_inode_path, uid, gid, mode + cap, link_target))
else:
tmppath = self.DIR + entry_inode_path
if tmppath.find(' ', 1, len(tmppath)) > 0:
spaces_file = self.BASE_MYDIR + 'config' + os.sep + self.FileName + '_space.txt'
if not os.path.isfile(spaces_file):
f = open(spaces_file, 'tw', encoding='utf-8')
self.__appendf(tmppath, spaces_file)
f.close()
else:
self.__appendf(tmppath, spaces_file)
tmppath = tmppath.replace(' ', '_')
self.fsconfig.append(
'%s %s %s %s %s' % (tmppath, uid, gid, mode + cap, link_target))
else:
self.fsconfig.append('%s %s %s %s %s' % (
self.DIR + entry_inode_path, uid, gid, mode + cap, link_target))
for fuk_symb in fuking_symbols:
tmppath = tmppath.replace(fuk_symb, '\\' + fuk_symb)
self.context.append('/%s %s' % (tmppath, con))
if os.name == 'posix':
os.symlink(link_target, target)
if os.name == 'nt':
with open(target.replace('/', os.sep), 'wb') as out:
tmp = bytes.fromhex('213C73796D6C696E6B3EFFFE')
for index in list(link_target):
tmp = tmp + struct.pack('>sx', index.encode('utf-8'))
out.write(tmp + struct.pack('xx'))
else:
pass
except:
pass
dir_my = self.CONFING_DIR + os.sep
if not os.path.isdir(dir_my):
os.makedirs(dir_my)
f = open(dir_my + self.FileName + '_size.txt', 'tw', encoding='utf-8')
self.__appendf(os.path.getsize(self.OUTPUT_IMAGE_FILE), dir_my + self.FileName + '_size.txt')
f.close()
with open(self.OUTPUT_IMAGE_FILE, 'rb') as file:
root = ext4.Volume(file).root
dirlist = []
for file_name, inode_idx, file_type in root.open_dir():
dirlist.append(file_name)
dirr = self.__out_name(os.path.basename(self.OUTPUT_IMAGE_FILE).rsplit('.', 1)[0]) # 11.05.18
setattr(self, 'DIR', dirr)
scan_dir(root)
for c in self.fsconfig:
if dirr == 'vendor':
self.fsconfig.insert(0, '/ 0 2000 0755')
self.fsconfig.insert(1, dirr + ' 0 2000 0755')
elif dirr == 'system':
self.fsconfig.insert(0, '/' + ' 0 0 0755')
self.fsconfig.insert(1, '/' + 'lost+found' + ' 0 0 0700')
self.fsconfig.insert(2, dirr + ' 0 0 0755')
else:
self.fsconfig.insert(0, '/' + ' 0 0 0755')
self.fsconfig.insert(1, dirr + ' 0 0 0755')
break
self.__appendf('\n'.join(self.fsconfig), self.CONFING_DIR + os.sep + fs_config_file)
if self.context: # 11.05.18
self.context.sort() # 11.05.18
for c in self.context:
if re.search('lost..found', c):
self.context.insert(0, '/' + ' ' + c.split(" ")[1])
self.context.insert(1, '/' + dirr + '(/.*)? ' + c.split(" ")[1])
self.context.insert(2, '/' + dirr + ' ' + c.split(" ")[1])
self.context.insert(3, '/' + dirr + '/lost+\\found' + ' ' + c.split(" ")[1])
break
for c in self.context:
if re.search('/system/system/build..prop ', c):
self.context.insert(3, '/lost+\\found' + ' u:object_r:rootfs:s0')
self.context.insert(4, '/' + dirr + '/' + dirr + '(/.*)? ' + c.split(" ")[1])
break
self.__appendf('\n'.join(self.context), contexts) # 11.05.18
def converSimgToImg(self, target):
with open(target, "rb") as img_file:
setattr(self, 'sign_offset', self.checkSignOffset(img_file))
if self.sign_offset > 0:
img_file.seek(self.sign_offset, 0)
header = ext4_file_header(img_file.read(28))
total_chunks = header.total_chunks
if header.file_header_size > EXT4_SPARSE_HEADER_LEN:
img_file.seek(header.file_header_size - EXT4_SPARSE_HEADER_LEN, 1)
with open(target.replace(".img", ".raw.img"), "wb") as raw_img_file:
sector_base = 82528
output_len = 0
while total_chunks > 0:
chunk_header = ext4_chunk_header(img_file.read(EXT4_CHUNK_HEADER_SIZE))
sector_size = (chunk_header.chunk_size * header.block_size) >> 9
chunk_data_size = chunk_header.total_size - header.chunk_header_size
if chunk_header.type == 0xCAC1: # CHUNK_TYPE_RAW
if header.chunk_header_size > EXT4_CHUNK_HEADER_SIZE:
img_file.seek(header.chunk_header_size - EXT4_CHUNK_HEADER_SIZE, 1)
data = img_file.read(chunk_data_size)
len_data = len(data)
if len_data == (sector_size << 9):
raw_img_file.write(data)
output_len += len_data
sector_base += sector_size
else:
if chunk_header.type == 0xCAC2: # CHUNK_TYPE_FILL
if header.chunk_header_size > EXT4_CHUNK_HEADER_SIZE:
img_file.seek(header.chunk_header_size - EXT4_CHUNK_HEADER_SIZE, 1)
data = img_file.read(chunk_data_size)
len_data = sector_size << 9
raw_img_file.write(struct.pack("B", 0) * len_data)
output_len += len(data)
sector_base += sector_size
else:
if chunk_header.type == 0xCAC3: # CHUNK_TYPE_DONT_CARE
if header.chunk_header_size > EXT4_CHUNK_HEADER_SIZE:
img_file.seek(header.chunk_header_size - EXT4_CHUNK_HEADER_SIZE, 1)
data = img_file.read(chunk_data_size)
len_data = sector_size << 9
raw_img_file.write(struct.pack("B", 0) * len_data)
output_len += len(data)
sector_base += sector_size
else:
len_data = sector_size << 9
raw_img_file.write(struct.pack("B", 0) * len_data)
sector_base += sector_size
total_chunks -= 1
self.OUTPUT_IMAGE_FILE = target.replace(".img", ".raw.img")
@staticmethod
def fixmoto(input_file):
if not os.path.exists(input_file):
return
output_file = input_file + "_"
if os.path.exists(output_file):
try:
os.remove(output_file)
except:
pass
with open(input_file, 'rb') as f:
data = f.read(500000)
moto = re.search(b'\x4d\x4f\x54\x4f', data)
if not moto:
return
result = []
for i in re.finditer(b'\x53\xEF', data):
result.append(i.start() - 1080)
offset = 0
for i in result:
if data[i] == 0:
offset = i
break
if offset > 0:
with open(output_file, 'wb') as o, open(input_file, 'rb') as f:
# data = f.seek(offset)
data = f.read(15360)
if data:
devnull = o.write(data)
try:
os.remove(input_file)
os.rename(output_file, input_file)
except:
pass
def checkSignOffset(self, file):
size = os.stat(file.name).st_size
if size <= 52428800:
mm = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
else:
mm = mmap.mmap(file.fileno(), 52428800, access=mmap.ACCESS_READ) # 52428800=50Mb
offset = mm.find(struct.pack('<L', EXT4_HEADER_MAGIC))
return offset
def __getTypeTarget(self, target):
filename, file_extension = os.path.splitext(target)
if file_extension == '.img':
with open(target, "rb") as img_file:
setattr(self, 'sign_offset', self.checkSignOffset(img_file))
if self.sign_offset > 0:
img_file.seek(self.sign_offset, 0)
header = ext4_file_header(img_file.read(28))
if header.magic != EXT4_HEADER_MAGIC:
return 'img'
else:
return 'simg'
def main(self, target, output_dir, work):
self.BASE_DIR = (os.path.realpath(os.path.dirname(target)) + os.sep)
self.BASE_MYDIR = output_dir + os.sep
self.EXTRACT_DIR = os.path.realpath(os.path.dirname(output_dir)) + os.sep + self.__out_name(
os.path.basename(output_dir)) # output_dir
self.OUTPUT_IMAGE_FILE = self.BASE_DIR + os.path.basename(target)
self.OUTPUT_MYIMAGE_FILE = os.path.basename(target)
self.MYFileName = os.path.basename(self.OUTPUT_IMAGE_FILE).replace(".img", "")
self.FileName = self.__file_name(os.path.basename(target))
target_type = 'img'
self.CONFING_DIR = work + os.sep + 'config'
if target_type == 'simg':
print(".....Convert %s to %s" % (
os.path.basename(target), os.path.basename(target).replace(".img", ".raw.img")))
self.converSimgToImg(target)
with open(os.path.abspath(self.OUTPUT_IMAGE_FILE), 'rb') as f:
data = f.read(500000)
moto = re.search(b'\x4d\x4f\x54\x4f', data)
if moto:
print(".....Finding MOTO structure! Fixing.....")
self.fixmoto(os.path.abspath(self.OUTPUT_IMAGE_FILE))
print("Extracting %s --> %s" % (os.path.basename(target), os.path.basename(self.EXTRACT_DIR)))
start = dti()
self.__ext4extractor()
print("Done! [%s]" % (dti() - start))
if target_type == 'img':
with open(os.path.abspath(self.OUTPUT_IMAGE_FILE), 'rb') as f:
data = f.read(500000)
moto = re.search(b'\x4d\x4f\x54\x4f', data)
if moto:
print(".....Finding MOTO structure! Fixing.....")
self.fixmoto(os.path.abspath(self.OUTPUT_IMAGE_FILE))
print("Extracting %s --> %s" % (os.path.basename(target), os.path.basename(self.EXTRACT_DIR)))
start = dti()
self.__ext4extractor()
print("Done! [%s]" % (dti() - start))

946
lpunpack.py Normal file
View File

@ -0,0 +1,946 @@
import argparse
import copy
import enum
import io
import json
import re
import struct
import sys
from dataclasses import dataclass, field
import os
from string import Template
from typing import IO, Dict, List, TypeVar, cast, BinaryIO, Tuple
from timeit import default_timer as dti
SPARSE_HEADER_MAGIC = 0xED26FF3A
SPARSE_HEADER_SIZE = 28
SPARSE_CHUNK_HEADER_SIZE = 12
LP_PARTITION_RESERVED_BYTES = 4096
LP_METADATA_GEOMETRY_MAGIC = 0x616c4467
LP_METADATA_GEOMETRY_SIZE = 4096
LP_METADATA_HEADER_MAGIC = 0x414C5030
LP_SECTOR_SIZE = 512
LP_TARGET_TYPE_LINEAR = 0
LP_TARGET_TYPE_ZERO = 1
LP_PARTITION_ATTR_READONLY = (1 << 0)
LP_PARTITION_ATTR_SLOT_SUFFIXED = (1 << 1)
LP_PARTITION_ATTR_UPDATED = (1 << 2)
LP_PARTITION_ATTR_DISABLED = (1 << 3)
LP_BLOCK_DEVICE_SLOT_SUFFIXED = (1 << 0)
LP_GROUP_SLOT_SUFFIXED = (1 << 0)
PLAIN_TEXT_TEMPLATE = """Slot 0:
Metadata version: $metadata_version
Metadata size: $metadata_size bytes
Metadata max size: $metadata_max_size bytes
Metadata slot count: $metadata_slot_count
Header flags: $header_flags
Partition table:
------------------------
$partitions
------------------------
Super partition layout:
------------------------
$layouts
------------------------
Block device table:
------------------------
$blocks
------------------------
Group table:
------------------------
$groups
"""
def build_attribute_string(attributes: int) -> str:
if attributes & LP_PARTITION_ATTR_READONLY:
result = "readonly"
elif attributes & LP_PARTITION_ATTR_SLOT_SUFFIXED:
result = "slot-suffixed"
elif attributes & LP_PARTITION_ATTR_UPDATED:
result = "updated"
elif attributes & LP_PARTITION_ATTR_DISABLED:
result = "disabled"
else:
result = "none"
return result
def build_block_device_flag_string(flags: int) -> str:
return "slot-suffixed" if (flags & LP_BLOCK_DEVICE_SLOT_SUFFIXED) else "none"
def build_group_flag_string(flags: int) -> str:
return "slot-suffixed" if (flags & LP_GROUP_SLOT_SUFFIXED) else "none"
class FormatType(enum.Enum):
TEXT = "text"
JSON = "json"
class EnumAction(argparse.Action):
"""Argparse action for handling Enums"""
def __init__(self, **kwargs):
enum_type = kwargs.pop("type", None)
if enum_type is None:
raise ValueError("Type must be assigned an Enum when using EnumAction")
if not issubclass(enum_type, enum.Enum):
raise TypeError("Type must be an Enum when using EnumAction")
kwargs.setdefault("choices", tuple(e.value for e in enum_type))
super(EnumAction, self).__init__(**kwargs)
self._enum = enum_type
def __call__(self, parser, namespace, values, option_string=None):
value = self._enum(values)
setattr(namespace, self.dest, value)
class ShowJsonInfo(json.JSONEncoder):
def __init__(self, ignore_keys: List[str], **kwargs):
super().__init__(**kwargs)
self._ignore_keys = ignore_keys
def _remove_ignore_keys(self, data: Dict):
_data = copy.deepcopy(data)
for field_key, v in data.items():
if field_key in self._ignore_keys:
_data.pop(field_key)
continue
if v == 0:
_data.pop(field_key)
continue
if isinstance(v, int) and not isinstance(v, bool):
_data.update({field_key: str(v)})
return _data
def encode(self, data: Dict) -> str:
result = {
"partitions": list(map(self._remove_ignore_keys, data["partition_table"])),
"groups": list(map(self._remove_ignore_keys, data["group_table"])),
"block_devices": list(map(self._remove_ignore_keys, data["block_devices"]))
}
return super().encode(result)
class SparseHeader(object):
def __init__(self, buffer):
fmt = '<I4H4I'
(
self.magic, # 0xed26ff3a
self.major_version, # (0x1) - reject images with higher major versions
self.minor_version, # (0x0) - allow images with higer minor versions
self.file_hdr_sz, # 28 bytes for first revision of the file format
self.chunk_hdr_sz, # 12 bytes for first revision of the file format
self.blk_sz, # block size in bytes, must be a multiple of 4 (4096)
self.total_blks, # total blocks in the non-sparse output image
self.total_chunks, # total chunks in the sparse input image
self.image_checksum # CRC32 checksum of the original data, counting "don't care"
) = struct.unpack(fmt, buffer[0:struct.calcsize(fmt)])
class SparseChunkHeader(object):
"""
Following a Raw or Fill or CRC32 chunk is data.
For a Raw chunk, it's the data in chunk_sz * blk_sz.
For a Fill chunk, it's 4 bytes of the fill data.
For a CRC32 chunk, it's 4 bytes of CRC32
"""
def __init__(self, buffer):
fmt = '<2H2I'
(
self.chunk_type, # 0xCAC1 -> raw; 0xCAC2 -> fill; 0xCAC3 -> don't care */
self.reserved,
self.chunk_sz, # in blocks in output image * /
self.total_sz, # in bytes of chunk input file including chunk header and data * /
) = struct.unpack(fmt, buffer[0:struct.calcsize(fmt)])
class LpMetadataBase:
_fmt = None
@classmethod
@property
def size(cls) -> int:
return struct.calcsize(cls._fmt)
class LpMetadataGeometry(LpMetadataBase):
"""
Offset 0: Magic signature
Offset 4: Size of the `LpMetadataGeometry`
Offset 8: SHA256 checksum
Offset 40: Maximum amount of space a single copy of the metadata can use
Offset 44: Number of copies of the metadata to keep
Offset 48: Logical block size
"""
_fmt = '<2I32s3I'
def __init__(self, buffer):
(
self.magic,
self.struct_size,
self.checksum,
self.metadata_max_size,
self.metadata_slot_count,
self.logical_block_size
) = struct.unpack(self._fmt, buffer[0:struct.calcsize(self._fmt)])
# self.size
class LpMetadataTableDescriptor(LpMetadataBase):
"""
Offset 0: Location of the table, relative to end of the metadata header.
Offset 4: Number of entries in the table.
Offset 8: Size of each entry in the table, in bytes.
"""
_fmt = '<3I'
def __init__(self, buffer):
(
self.offset,
self.num_entries,
self.entry_size
) = struct.unpack(self._fmt, buffer[:struct.calcsize(self._fmt)])
class LpMetadataPartition(LpMetadataBase):
"""
Offset 0: Name of this partition in ASCII characters. Any unused characters in
the buffer must be set to 0. Characters may only be alphanumeric or _.
The name must include at least one ASCII character, and it must be unique
across all partition names. The length (36) is the same as the maximum
length of a GPT partition name.
Offset 36: Attributes for the partition (see LP_PARTITION_ATTR_* flags above).
Offset 40: Index of the first extent owned by this partition. The extent will
start at logical sector 0. Gaps between extents are not allowed.
Offset 44: Number of extents in the partition. Every partition must have at least one extent.
Offset 48: Group this partition belongs to.
"""
_fmt = '<36s4I'
def __init__(self, buffer):
(
self.name,
self.attributes,
self.first_extent_index,
self.num_extents,
self.group_index
) = struct.unpack(self._fmt, buffer[0:struct.calcsize(self._fmt)])
self.name = self.name.decode("utf-8").strip('\x00')
@property
def filename(self) -> str:
return f'{self.name}.img'
class LpMetadataExtent(LpMetadataBase):
"""
Offset 0: Length of this extent, in 512-byte sectors.
Offset 8: Target type for device-mapper (see LP_TARGET_TYPE_* values).
Offset 12: Contents depends on target_type. LINEAR: The sector on the physical partition that this extent maps onto.
ZERO: This field must be 0.
Offset 20: Contents depends on target_type. LINEAR: Must be an index into the block devices table.
"""
_fmt = '<QIQI'
def __init__(self, buffer):
(
self.num_sectors,
self.target_type,
self.target_data,
self.target_source
) = struct.unpack(self._fmt, buffer[0:struct.calcsize(self._fmt)])
class LpMetadataHeader(LpMetadataBase):
"""
+-----------------------------------------+
| Header data - fixed size |
+-----------------------------------------+
| Partition table - variable size |
+-----------------------------------------+
| Partition table extents - variable size |
+-----------------------------------------+
Offset 0: Four bytes equal to `LP_METADATA_HEADER_MAGIC`
Offset 4: Version number required to read this metadata. If the version is not
equal to the library version, the metadata should be considered incompatible.
Offset 6: Minor version. A library supporting newer features should be able to
read metadata with an older minor version. However, an older library
should not support reading metadata if its minor version is higher.
Offset 8: The size of this header struct.
Offset 12: SHA256 checksum of the header, up to |header_size| bytes, computed as if this field were set to 0.
Offset 44: The total size of all tables. This size is contiguous; tables may not
have gaps in between, and they immediately follow the header.
Offset 48: SHA256 checksum of all table contents.
Offset 80: Partition table descriptor.
Offset 92: Extent table descriptor.
Offset 104: Updateable group descriptor.
Offset 116: Block device table.
Offset 128: Header flags are independent of the version number and intended to be informational only.
New flags can be added without bumping the version.
Offset 132: Reserved (zero), pad to 256 bytes.
"""
_fmt = '<I2hI32sI32s'
partitions: LpMetadataTableDescriptor = field(default=None)
extents: LpMetadataTableDescriptor = field(default=None)
groups: LpMetadataTableDescriptor = field(default=None)
block_devices: LpMetadataTableDescriptor = field(default=None)
def __init__(self, buffer):
(
self.magic,
self.major_version,
self.minor_version,
self.header_size,
self.header_checksum,
self.tables_size,
self.tables_checksum
) = struct.unpack(self._fmt, buffer[0:struct.calcsize(self._fmt)])
self.flags = 0
# self.size
class LpMetadataPartitionGroup(LpMetadataBase):
"""
Offset 0: Name of this group. Any unused characters must be 0.
Offset 36: Flags (see LP_GROUP_*).
Offset 40: Maximum size in bytes. If 0, the group has no maximum size.
"""
_fmt = '<36sIQ'
def __init__(self, buffer):
(
self.name,
self.flags,
self.maximum_size
) = struct.unpack(self._fmt, buffer[0:struct.calcsize(self._fmt)])
self.name = self.name.decode("utf-8").strip('\x00')
class LpMetadataBlockDevice(LpMetadataBase):
"""
Offset 0: First usable sector for allocating logical partitions. this will be
the first sector after the initial geometry blocks, followed by the
space consumed by metadata_max_size*metadata_slot_count*2.
Offset 8: Alignment for defining partitions or partition extents. For example,
an alignment of 1MiB will require that all partitions have a size evenly
divisible by 1MiB, and that the smallest unit the partition can grow by is 1MiB.
Alignment is normally determined at runtime when growing or adding
partitions. If for some reason the alignment cannot be determined, then
this predefined alignment in the geometry is used instead. By default, it is set to 1MiB.
Offset 12: Alignment offset for "stacked" devices. For example, if the "super"
partition itself is not aligned within the parent block device's
partition table, then we adjust for this in deciding where to place
|first_logical_sector|.
Similar to |alignment|, this will be derived from the operating system.
If it cannot be determined, it is assumed to be 0.
Offset 16: Block device size, as specified when the metadata was created.
This can be used to verify the geometry against a target device.
Offset 24: Partition name in the GPT. Any unused characters must be 0.
Offset 60: Flags (see LP_BLOCK_DEVICE_* flags below).
"""
_fmt = '<Q2IQ36sI'
def __init__(self, buffer):
(
self.first_logical_sector,
self.alignment,
self.alignment_offset,
self.block_device_size,
self.partition_name,
self.flags
) = struct.unpack(self._fmt, buffer[0:struct.calcsize(self._fmt)])
self.partition_name = self.partition_name.decode("utf-8").strip('\x00')
@dataclass
class Metadata:
header: LpMetadataHeader = field(default=None)
geometry: LpMetadataGeometry = field(default=None)
partitions: List[LpMetadataPartition] = field(default_factory=list)
extents: List[LpMetadataExtent] = field(default_factory=list)
groups: List[LpMetadataPartitionGroup] = field(default_factory=list)
block_devices: List[LpMetadataBlockDevice] = field(default_factory=list)
@property
def info(self) -> Dict:
return self._get_info()
@property
def metadata_region(self) -> int:
if self.geometry is None:
return 0
return LP_PARTITION_RESERVED_BYTES + (
LP_METADATA_GEOMETRY_SIZE + self.geometry.metadata_max_size * self.geometry.metadata_slot_count
) * 2
def _get_extents_string(self, partition: LpMetadataPartition) -> List[str]:
result = []
first_sector = 0
for extent_number in range(partition.num_extents):
index = partition.first_extent_index + extent_number
extent = self.extents[index]
_base = f"{first_sector} .. {first_sector + extent.num_sectors - 1}"
first_sector += extent.num_sectors
if extent.target_type == LP_TARGET_TYPE_LINEAR:
result.append(
f"{_base} linear {self.block_devices[extent.target_source].partition_name} {extent.target_data}"
)
elif extent.target_type == LP_TARGET_TYPE_ZERO:
result.append(f"{_base} zero")
return result
def _get_partition_layout(self) -> List[str]:
result = []
for partition in self.partitions:
for extent_number in range(partition.num_extents):
index = partition.first_extent_index + extent_number
extent = self.extents[index]
block_device_name = ""
if extent.target_type == LP_TARGET_TYPE_LINEAR:
block_device_name = self.block_devices[extent.target_source].partition_name
result.append(
f"{block_device_name}: {extent.target_data} .. {extent.target_data + extent.num_sectors}: "
f"{partition.name} ({extent.num_sectors} sectors)"
)
return result
def get_offsets(self, slot_number: int = 0) -> List[int]:
base = LP_PARTITION_RESERVED_BYTES + (LP_METADATA_GEOMETRY_SIZE * 2)
_tmp_offset = self.geometry.metadata_max_size * slot_number
primary_offset = base + _tmp_offset
backup_offset = base + self.geometry.metadata_max_size * self.geometry.metadata_slot_count + _tmp_offset
return [primary_offset, backup_offset]
def _get_info(self) -> Dict:
# TODO 25.01.2023: Liblp version 1.2 build_header_flag_string check header version 1.2
result = {}
try:
result = {
"metadata_version": f"{self.header.major_version}.{self.header.minor_version}",
"metadata_size": self.header.header_size + self.header.tables_size,
"metadata_max_size": self.geometry.metadata_max_size,
"metadata_slot_count": self.geometry.metadata_slot_count,
"header_flags": "none",
"block_devices": [
{
"name": item.partition_name,
"first_sector": item.first_logical_sector,
"size": item.block_device_size,
"block_size": self.geometry.logical_block_size,
"flags": build_block_device_flag_string(item.flags),
"alignment": item.alignment,
"alignment_offset": item.alignment_offset
} for item in self.block_devices
],
"group_table": [
{
"name": self.groups[index].name,
"maximum_size": self.groups[index].maximum_size,
"flags": build_group_flag_string(self.groups[index].flags)
} for index in range(0, self.header.groups.num_entries)
],
"partition_table": [
{
"name": item.name,
"group_name": self.groups[item.group_index].name,
"is_dynamic": True,
"size": self.extents[item.first_extent_index].num_sectors * LP_SECTOR_SIZE,
"attributes": build_attribute_string(item.attributes),
"extents": self._get_extents_string(item)
} for item in self.partitions
],
"partition_layout": self._get_partition_layout()
}
except Exception:
pass
finally:
return result
def to_json(self) -> str:
data = self._get_info()
if not data:
return ""
return json.dumps(
data,
indent=1,
cls=ShowJsonInfo,
ignore_keys=[
'metadata_version', 'metadata_size', 'metadata_max_size', 'metadata_slot_count', 'header_flags',
'partition_layout',
'attributes', 'extents', 'flags', 'first_sector'
])
def __str__(self):
data = self._get_info()
if not data:
return ""
template = Template(PLAIN_TEXT_TEMPLATE)
layouts = "\n".join(data["partition_layout"])
partitions = "------------------------\n".join(
[
" Name: {}\n Group: {}\n Attributes: {}\n Extents:\n {}\n".format(
item["name"],
item["group_name"],
item["attributes"],
"\n".join(item["extents"])
) for item in data["partition_table"]
]
)[:-1]
blocks = "\n".join(
[
" Partition name: {}\n First sector: {}\n Size: {} bytes\n Flags: {}".format(
item["name"],
item["first_sector"],
item["size"],
item["flags"]
)
for item in data["block_devices"]
]
)
groups = "------------------------\n".join(
[
" Name: {}\n Maximum size: {} bytes\n Flags: {}\n".format(
item["name"],
item["maximum_size"],
item["flags"]
) for item in data["group_table"]
]
)[:-1]
return template.substitute(partitions=partitions, layouts=layouts, blocks=blocks, groups=groups, **data)
class LpUnpackError(Exception):
"""Raised any error unpacking"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
@dataclass
class UnpackJob:
name: str
geometry: LpMetadataGeometry
parts: List[Tuple[int, int]] = field(default_factory=list)
total_size: int = field(default=0)
class SparseImage:
def __init__(self, fd):
self._fd = fd
self.header = None
def check(self):
self._fd.seek(0)
self.header = SparseHeader(self._fd.read(SPARSE_HEADER_SIZE))
return False if self.header.magic != SPARSE_HEADER_MAGIC else True
def _read_data(self, chunk_data_size: int):
if self.header.chunk_hdr_sz > SPARSE_CHUNK_HEADER_SIZE:
self._fd.seek(self.header.chunk_hdr_sz - SPARSE_CHUNK_HEADER_SIZE, 1)
return self._fd.read(chunk_data_size)
def unsparse(self):
if not self.header:
self._fd.seek(0)
self.header = SparseHeader(self._fd.read(SPARSE_HEADER_SIZE))
chunks = self.header.total_chunks
self._fd.seek(self.header.file_hdr_sz - SPARSE_HEADER_SIZE, 1)
unsparse_file_dir = os.path.dirname(self._fd.name)
unsparse_file = os.path.join(unsparse_file_dir,
"{}.unsparse.img".format(os.path.splitext(os.path.basename(self._fd.name))[0]))
with open(str(unsparse_file), 'wb') as out:
sector_base = 82528
output_len = 0
while chunks > 0:
chunk_header = SparseChunkHeader(self._fd.read(SPARSE_CHUNK_HEADER_SIZE))
sector_size = (chunk_header.chunk_sz * self.header.blk_sz) >> 9
chunk_data_size = chunk_header.total_sz - self.header.chunk_hdr_sz
if chunk_header.chunk_type == 0xCAC1:
data = self._read_data(chunk_data_size)
len_data = len(data)
if len_data == (sector_size << 9):
out.write(data)
output_len += len_data
sector_base += sector_size
else:
if chunk_header.chunk_type == 0xCAC2:
data = self._read_data(chunk_data_size)
len_data = sector_size << 9
out.write(struct.pack("B", 0) * len_data)
output_len += len(data)
sector_base += sector_size
else:
if chunk_header.chunk_type == 0xCAC3:
data = self._read_data(chunk_data_size)
len_data = sector_size << 9
out.write(struct.pack("B", 0) * len_data)
output_len += len(data)
sector_base += sector_size
else:
len_data = sector_size << 9
out.write(struct.pack("B", 0) * len_data)
sector_base += sector_size
chunks -= 1
return unsparse_file
T = TypeVar('T')
class LpUnpack(object):
def __init__(self, **kwargs):
self._partition_name = kwargs.get('NAME')
self._show_info = kwargs.get('SHOW_INFO', True)
self._show_info_format = kwargs.get('SHOW_INFO_FORMAT', FormatType.TEXT)
self._config = kwargs.get('CONFIG', None)
self._slot_num = None
self._fd: BinaryIO = open(kwargs.get('SUPER_IMAGE'), 'rb')
self._out_dir = kwargs.get('OUTPUT_DIR', None)
def _check_out_dir_exists(self):
if self._out_dir is None:
return
if not os.path.exists(self._out_dir):
os.makedirs(self._out_dir, exist_ok=True)
def _extract_partition(self, unpack_job: UnpackJob):
self._check_out_dir_exists()
start = dti()
print(f'Extracting partition [{unpack_job.name}]')
out_file = os.path.join(self._out_dir, f'{unpack_job.name}.img')
with open(str(out_file), 'wb') as out:
for part in unpack_job.parts:
offset, size = part
self._write_extent_to_file(out, offset, size, unpack_job.geometry.logical_block_size)
print('Done:[%s]' % (dti() - start))
def _extract(self, partition, metadata):
unpack_job = UnpackJob(name=partition.name, geometry=metadata.geometry)
if partition.num_extents != 0:
for extent_number in range(partition.num_extents):
index = partition.first_extent_index + extent_number
extent = metadata.extents[index]
if extent.target_type != LP_TARGET_TYPE_LINEAR:
raise LpUnpackError(f'Unsupported target type in extent: {extent.target_type}')
offset = extent.target_data * LP_SECTOR_SIZE
size = extent.num_sectors * LP_SECTOR_SIZE
unpack_job.parts.append((offset, size))
unpack_job.total_size += size
self._extract_partition(unpack_job)
def _get_data(self, count: int, size: int, clazz: T) -> List[T]:
result = []
while count > 0:
result.append(clazz(self._fd.read(size)))
count -= 1
return result
def _read_chunk(self, block_size):
while True:
data = self._fd.read(block_size)
if not data:
break
yield data
def _read_metadata_header(self, metadata: Metadata):
offsets = metadata.get_offsets()
for index, offset in enumerate(offsets):
self._fd.seek(offset, io.SEEK_SET)
header = LpMetadataHeader(self._fd.read(80))
header.partitions = LpMetadataTableDescriptor(self._fd.read(12))
header.extents = LpMetadataTableDescriptor(self._fd.read(12))
header.groups = LpMetadataTableDescriptor(self._fd.read(12))
header.block_devices = LpMetadataTableDescriptor(self._fd.read(12))
if header.magic != LP_METADATA_HEADER_MAGIC:
check_index = index + 1
if check_index > len(offsets):
raise LpUnpackError('Logical partition metadata has invalid magic value.')
else:
print(f'Read Backup header by offset 0x{offsets[check_index]:x}')
continue
metadata.header = header
self._fd.seek(offset + header.header_size, io.SEEK_SET)
def _read_metadata(self):
self._fd.seek(LP_PARTITION_RESERVED_BYTES, io.SEEK_SET)
metadata = Metadata(geometry=self._read_primary_geometry())
if metadata.geometry.magic != LP_METADATA_GEOMETRY_MAGIC:
raise LpUnpackError('Logical partition metadata has invalid geometry magic signature.')
if metadata.geometry.metadata_slot_count == 0:
raise LpUnpackError('Logical partition metadata has invalid slot count.')
if metadata.geometry.metadata_max_size % LP_SECTOR_SIZE != 0:
raise LpUnpackError('Metadata max size is not sector-aligned.')
self._read_metadata_header(metadata)
metadata.partitions = self._get_data(
metadata.header.partitions.num_entries,
metadata.header.partitions.entry_size,
LpMetadataPartition
)
metadata.extents = self._get_data(
metadata.header.extents.num_entries,
metadata.header.extents.entry_size,
LpMetadataExtent
)
metadata.groups = self._get_data(
metadata.header.groups.num_entries,
metadata.header.groups.entry_size,
LpMetadataPartitionGroup
)
metadata.block_devices = self._get_data(
metadata.header.block_devices.num_entries,
metadata.header.block_devices.entry_size,
LpMetadataBlockDevice
)
try:
super_device: LpMetadataBlockDevice = cast(LpMetadataBlockDevice, iter(metadata.block_devices).__next__())
if metadata.metadata_region > super_device.first_logical_sector * LP_SECTOR_SIZE:
raise LpUnpackError('Logical partition metadata overlaps with logical partition contents.')
except StopIteration:
raise LpUnpackError('Metadata does not specify a super device.')
return metadata
def _read_primary_geometry(self) -> LpMetadataGeometry:
geometry = LpMetadataGeometry(self._fd.read(LP_METADATA_GEOMETRY_SIZE))
if geometry is not None:
return geometry
else:
return LpMetadataGeometry(self._fd.read(LP_METADATA_GEOMETRY_SIZE))
def _write_extent_to_file(self, fd: IO, offset: int, size: int, block_size: int):
self._fd.seek(offset)
for block in self._read_chunk(block_size):
if size == 0:
break
fd.write(block)
size -= block_size
def unpack(self):
try:
if SparseImage(self._fd).check():
print('Sparse image detected.')
print('Process conversion to non sparse image...')
unsparse_file = SparseImage(self._fd).unsparse()
self._fd.close()
self._fd = open(str(unsparse_file), 'rb')
print('Result:[ok]')
self._fd.seek(0)
metadata = self._read_metadata()
if self._partition_name:
filter_partition = []
for index, partition in enumerate(metadata.partitions):
if partition.name in self._partition_name:
filter_partition.append(partition)
if not filter_partition:
raise LpUnpackError(f'Could not find partition: {self._partition_name}')
metadata.partitions = filter_partition
if self._slot_num:
if self._slot_num > metadata.geometry.metadata_slot_count:
raise LpUnpackError(f'Invalid metadata slot number: {self._slot_num}')
if self._show_info:
if self._show_info_format == FormatType.TEXT:
print(metadata)
elif self._show_info_format == FormatType.JSON:
print(f"{metadata.to_json()}\n")
if not self._show_info and self._out_dir is None:
raise LpUnpackError(message=f'Not specified directory for extraction')
if self._out_dir:
for partition in metadata.partitions:
self._extract(partition, metadata)
except LpUnpackError as e:
print(e.message)
sys.exit(1)
finally:
self._fd.close()
def create_parser():
_parser = argparse.ArgumentParser(
description=f'{os.path.basename(sys.argv[0])} - command-line tool for extracting partition images from super'
)
_parser.add_argument(
'-p',
'--partition',
dest='NAME',
type=lambda x: re.split("\\w+", x),
help='Extract the named partition. This can be specified multiple times or through the delimiter ["," ":"]'
)
_parser.add_argument(
'-S',
'--slot',
dest='NUM',
type=int,
help=' !!! No implementation yet !!! Slot number (default is 0).'
)
if sys.version_info >= (3, 9):
_parser.add_argument(
'--info',
dest='SHOW_INFO',
default=False,
action=argparse.BooleanOptionalAction,
help='Displays pretty-printed partition metadata'
)
else:
_parser.add_argument(
'--info',
dest='SHOW_INFO',
action='store_true',
help='Displays pretty-printed partition metadata'
)
_parser.add_argument(
'--no-info',
dest='SHOW_INFO',
action='store_false'
)
_parser.set_defaults(SHOW_INFO=False)
_parser.add_argument(
'-f',
'--format',
dest='SHOW_INFO_FORMAT',
type=FormatType,
action=EnumAction,
default=FormatType.TEXT,
help='Choice the format for printing info'
)
_parser.add_argument('SUPER_IMAGE')
_parser.add_argument(
'OUTPUT_DIR',
type=str,
nargs='?',
)
return _parser
def unpack(file: str, out: str):
_parser = argparse.ArgumentParser()
_parser.add_argument('--SUPER_IMAGE', default=file)
_parser.add_argument('--OUTPUT_DIR', default=out)
_parser.add_argument('--SHOW_INFO', default=False)
namespace = _parser.parse_args()
if not os.path.exists(namespace.SUPER_IMAGE):
raise FileNotFoundError("%s Cannot Find" % namespace.SUPER_IMAGE)
else:
LpUnpack(**vars(namespace)).unpack()
def main():
parser = create_parser()
namespace = parser.parse_args()
if len(sys.argv) >= 2:
if not os.path.exists(namespace.SUPER_IMAGE):
return 2
LpUnpack(**vars(namespace)).unpack()
else:
return 1

179
ofp_mtk_decrypt.py Normal file
View File

@ -0,0 +1,179 @@
#!/usr/bin/env python3
# Oppo OFP MTK Decrypter (c) B. Kerler 2022
# Licensed under MIT License
import os
import sys
import hashlib
from Crypto.Cipher import AES
from struct import unpack
from binascii import unhexlify, hexlify
def swap(ch):
return ((ch & 0xF) << 4) + ((ch & 0xF0) >> 4)
def keyshuffle(key, hkey):
for i in range(0, 0x10, 4):
key[i] = swap((hkey[i] ^ key[i]))
key[i + 1] = swap(hkey[i + 1] ^ key[i + 1])
key[i + 2] = swap(hkey[i + 2] ^ key[i + 2])
key[i + 3] = swap(hkey[i + 3] ^ key[i + 3])
return key
def mtk_shuffle(key, keylength, input, inputlength):
for i in range(0, inputlength):
k = key[(i % keylength)]
h = ((((input[i]) & 0xF0) >> 4) | (16 * ((input[i]) & 0xF)))
input[i] = k ^ h
return input
def mtk_shuffle2(key, keylength, input, inputlength):
for i in range(0, inputlength):
tmp = key[i % keylength] ^ input[i]
input[i] = ((tmp & 0xF0) >> 4) | (16 * (tmp & 0xF))
return input
def aes_cfb(key, iv, data, decrypt=True, segment_size=128):
cipher = AES.new(key, AES.MODE_CFB, IV=iv, segment_size=segment_size)
if decrypt:
plaintext = cipher.decrypt(data)
return plaintext
else:
ciphertext = cipher.encrypt(data)
return ciphertext
keytables = [
["67657963787565E837D226B69A495D21", # A77 CPH1715EX_11_A.04_170426, F1S A1601_MT6750_EX_11_A.15_160913 FW
"F6C50203515A2CE7D8C3E1F938B7E94C",
"42F2D5399137E2B2813CD8ECDF2F4D72"],
["9E4F32639D21357D37D226B69A495D21", # A77 CPH1715EX_11_A.04_170426, F1S A1601_MT6750_EX_11_A.15_160913 CDT
"A3D8D358E42F5A9E931DD3917D9A3218",
"386935399137416B67416BECF22F519A"],
["892D57E92A4D8A975E3C216B7C9DE189",
"D26DF2D9913785B145D18C7219B89F26",
"516989E4A1BFC78B365C6BC57D944391"],
["27827963787265EF89D126B69A495A21",
"82C50203285A2CE7D8C3E198383CE94C",
"422DD5399181E223813CD8ECDF2E4D72"],
["3C4A618D9BF2E4279DC758CD535147C3",
"87B13D29709AC1BF2382276C4E8DF232",
"59B7A8E967265E9BCABE2469FE4A915E"],
["1C3288822BF824259DC852C1733127D3", # A83_CPH1827_11_A.21_2G_180923 FW, Realme 3 RMX1827EX_11_C.13_200624_1264686e
"E7918D22799181CF2312176C9E2DF298",
"3247F889A7B6DECBCA3E28693E4AAAFE"],
["1E4F32239D65A57D37D2266D9A775D43",
"A332D3C3E42F5A3E931DD991729A321D",
"3F2A35399A373377674155ECF28FD19A"],
["122D57E92A518AFF5E3C786B7C34E189",
"DD6DF2D9543785674522717219989FB0",
"12698965A132C76136CC88C5DD94EE91"],
[
"ab3f76d7989207f2", # AES KEY
"2bf515b3a9737835" # AES IV
]
]
def getkey(index):
kt = keytables[index]
if len(kt) == 3:
obskey = bytearray(unhexlify(kt[0]))
encaeskey = bytearray(unhexlify(kt[1]))
encaesiv = bytearray(unhexlify(kt[2]))
aeskey = hexlify(hashlib.md5(mtk_shuffle2(obskey, 16, encaeskey, 16)).digest())[:16]
aesiv = hexlify(hashlib.md5(mtk_shuffle2(obskey, 16, encaesiv, 16)).digest())[:16]
else:
aeskey = bytes(kt[0], 'utf-8')
aesiv = bytes(kt[1], 'utf-8')
print(aeskey, aesiv)
return aeskey, aesiv
def brutekey(rf):
rf.seek(0)
encdata = rf.read(16)
for keyid in range(0, len(keytables)):
aeskey, aesiv = getkey(keyid)
data = aes_cfb(aeskey, aesiv, encdata, True)
if data[:3] == b"MMM":
return aeskey, aesiv
print("Unknown key. Please ask the author for support :)")
exit(0)
def cleancstring(input):
return input.replace(b"\x00", b"").decode('utf-8')
def main(filename, outdir):
if not os.path.exists(outdir):
os.mkdir(outdir)
hdrkey = bytearray(b"geyixue")
filesize = os.stat(filename).st_size
hdrlength = 0x6C
with open(filename, 'rb') as rf:
aeskey, aesiv = brutekey(rf)
rf.seek(filesize - hdrlength)
hdr = mtk_shuffle(hdrkey, len(hdrkey), bytearray(rf.read(hdrlength)), hdrlength)
prjname, unknownval, reserved, cpu, flashtype, hdr2entries, prjinfo, crc = unpack("46s Q 4s 7s 5s H 32s H", hdr)
hdr2length = hdr2entries * 0x60
prjname = cleancstring(prjname)
prjinfo = cleancstring(prjinfo)
cpu = cleancstring(cpu)
flashtype = cleancstring(flashtype)
if prjname != "": print(f"Detected prjname:{prjname}")
if prjinfo != "": print(f"Detected prjinfo:{prjinfo}")
if cpu != "": print(f"Detected cpu:{cpu}")
if flashtype != "": print(f"Detected flash:{flashtype}")
rf.seek(filesize - hdr2length - hdrlength)
hdr2 = mtk_shuffle(hdrkey, len(hdrkey), bytearray(rf.read(hdr2length)), hdr2length)
for i in range(0, len(hdr2) // 0x60):
name, start, length, enclength, filename, crc = unpack("<32s Q Q Q 32s Q", hdr2[i * 0x60:(i * 0x60) + 0x60])
name = name.replace(b"\x00", b"").decode('utf-8')
filename = filename.replace(b"\x00", b"").decode('utf-8')
print(f"Writing \"{name}\" as \"{outdir}/{filename}\"...")
with open(os.path.join(outdir, filename), 'wb') as wb:
if enclength > 0:
rf.seek(start)
encdata = rf.read(enclength)
if enclength % 16 != 0:
encdata += b"\x00" * (16 - (enclength % 16))
data = aes_cfb(aeskey, aesiv, encdata, True)
wb.write(data[:enclength])
length -= enclength
while length > 0:
size = 0x200000
if length < size:
size = length
data = rf.read(size)
length -= size
wb.write(data)
print(f"Files successfully decrypted to subdirectory {outdir}")
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Oppo MTK OFP decrypt tool 1.1 (c) B.Kerler 2020-2022\n")
print("Usage: %s <filename> <directory to extract>" % __file__)
sys.exit(1)
filename = sys.argv[1]
outdir = sys.argv[2]
main(filename, outdir)

365
ofp_qc_decrypt.py Normal file
View File

@ -0,0 +1,365 @@
#!/usr/bin/env python3
# (c) B.Kerler 2018-2021, MIT license
import os
import sys
import xml.etree.ElementTree as ET
import zipfile
from struct import unpack
from binascii import unhexlify, hexlify
from Crypto.Cipher import AES
from Crypto.Hash import MD5
import hashlib
import shutil
def swap(ch):
return ((ch & 0xF) << 4) + ((ch & 0xF0) >> 4)
def keyshuffle(key, hkey):
for i in range(0, 0x10, 4):
key[i] = swap((hkey[i] ^ key[i]))
key[i + 1] = swap(hkey[i + 1] ^ key[i + 1])
key[i + 2] = swap(hkey[i + 2] ^ key[i + 2])
key[i + 3] = swap(hkey[i + 3] ^ key[i + 3])
return key
def ROR(x, n, bits=32):
mask = (2 ** n) - 1
mask_bits = x & mask
return (x >> n) | (mask_bits << (bits - n))
def ROL(x, n, bits=32):
return ROR(x, bits - n, bits)
def generatekey1():
key1 = "42F2D5399137E2B2813CD8ECDF2F4D72"
key2 = "F6C50203515A2CE7D8C3E1F938B7E94C"
key3 = "67657963787565E837D226B69A495D21"
key1 = bytearray.fromhex(key1)
key2 = bytearray.fromhex(key2)
key3 = bytearray.fromhex(key3)
key2 = keyshuffle(key2, key3)
aeskey = bytes(hashlib.md5(key2).hexdigest()[0:16], 'utf-8')
key1 = keyshuffle(key1, key3)
iv = bytes(hashlib.md5(key1).hexdigest()[0:16], 'utf-8')
return aeskey, iv
def bytestolow(data):
h = MD5.new()
h.update(data)
shash = h.digest()
return hexlify(shash).lower()[0:16]
def deobfuscate(data, mask):
ret = bytearray()
for i in range(0, len(data)):
v = ROL((data[i] ^ mask[i]), 4, 8)
ret.append(v)
return ret
def generatekey2(filename):
keys = [
# R9s/A57t
["V1.4.17/1.4.27",
"27827963787265EF89D126B69A495A21",
"82C50203285A2CE7D8C3E198383CE94C",
"422DD5399181E223813CD8ECDF2E4D72"],
# a3s
["V1.6.17",
"E11AA7BB558A436A8375FD15DDD4651F",
"77DDF6A0696841F6B74782C097835169",
"A739742384A44E8BA45207AD5C3700EA"],
["V1.5.13",
"67657963787565E837D226B69A495D21",
"F6C50203515A2CE7D8C3E1F938B7E94C",
"42F2D5399137E2B2813CD8ECDF2F4D72"],
# R15 Pro CPH1831 V1.6.6 / FindX CPH1871 V1.6.9 / R17 Pro CPH1877 V1.6.17 / R17 PBEM00 V1.6.17 / A5 2020 V1.7.6 / K3 CPH1955 V1.6.26 UFS
# Reno 5G CPH1921 V1.6.26 / Realme 3 Pro RMX1851 V1.6.17 / Reno 10X Zoom V1.6.26 / R17 CPH1879 V1.6.17 / R17 Neo CPH1893 / K1 PBCM30
["V1.6.6/1.6.9/1.6.17/1.6.24/1.6.26/1.7.6",
"3C2D518D9BF2E4279DC758CD535147C3",
"87C74A29709AC1BF2382276C4E8DF232",
"598D92E967265E9BCABE2469FE4A915E"],
# RM1921EX V1.7.2, Realme X RMX1901 V1.7.2, Realme 5 Pro RMX1971 V1.7.2, Realme 5 RMX1911 V1.7.2
["V1.7.2",
"8FB8FB261930260BE945B841AEFA9FD4",
"E529E82B28F5A2F8831D860AE39E425D",
"8A09DA60ED36F125D64709973372C1CF"],
# OW19W8AP_11_A.23_200715
["V2.0.3",
"E8AE288C0192C54BF10C5707E9C4705B",
"D64FC385DCD52A3C9B5FBA8650F92EDA",
"79051FD8D8B6297E2E4559E997F63B7F"]
]
for dkey in keys:
key = bytearray()
iv = bytearray()
# "Read metadata failed"
mc = bytearray.fromhex(dkey[1])
userkey = bytearray.fromhex(dkey[2])
ivec = bytearray.fromhex(dkey[3])
# userkey=bytearray(unhexlify("A3D8D358E42F5A9E931DD3917D9A3218"))
# ivec=bytearray(unhexlify("386935399137416B67416BECF22F519A"))
# mc=bytearray(unhexlify("9E4F32639D21357D37D226B69A495D21"))
key = deobfuscate(userkey, mc)
iv = deobfuscate(ivec, mc)
key = bytestolow(key)
iv = bytestolow(iv)
pagesize, data = extract_xml(filename, key, iv)
if pagesize != 0:
return pagesize, key, iv, data
return 0, None, None, None
def extract_xml(filename, key, iv):
filesize = os.stat(filename).st_size
with open(filename, 'rb') as rf:
pagesize = 0
for x in [0x200, 0x1000]:
rf.seek(filesize - x + 0x10)
if unpack("<I", rf.read(4))[0] == 0x7CEF:
pagesize = x
break
if pagesize == 0:
print("Unknown pagesize. Aborting")
exit(0)
xmloffset = filesize - pagesize
rf.seek(xmloffset + 0x14)
offset = unpack("<I", rf.read(4))[0] * pagesize
length = unpack("<I", rf.read(4))[0]
if length < 200: # A57 hack
length = xmloffset - offset - 0x57
rf.seek(offset)
data = rf.read(length)
dec = aes_cfb(data, key, iv)
# h=MD5.new()
# h.update(data)
# print(dec.decode('utf-8'))
# print(h.hexdigest())
# print("Done.")
if b"<?xml" in dec:
return pagesize, dec
else:
return 0, ""
def aes_cfb(data, key, iv):
ctx = AES.new(key, AES.MODE_CFB, iv=iv, segment_size=128)
decrypted = ctx.decrypt(data)
return decrypted
def copysub(rf, wf, start, length):
rf.seek(start)
rlen = 0
while length > 0:
if length < 0x100000:
size = length
else:
size = 0x100000
data = rf.read(size)
wf.write(data)
rlen += len(data)
length -= size
return rlen
def copy(filename, wfilename, path, start, length, checksums):
print(f"\nExtracting {wfilename}")
with open(filename, 'rb') as rf:
with open(os.path.join(path, wfilename), 'wb') as wf:
rf.seek(start)
data = rf.read(length)
wf.write(data)
checkhashfile(os.path.join(path, wfilename), checksums, True)
def decryptfile(key, iv, filename, path, wfilename, start, length, rlength, checksums, decryptsize=0x40000):
print(f"\nExtracting {wfilename}")
if rlength == length:
tlen = length
length = (length // 0x4 * 0x4)
if tlen % 0x4 != 0:
length += 0x4
with open(filename, 'rb') as rf:
with open(os.path.join(path, wfilename), 'wb') as wf:
rf.seek(start)
size = decryptsize
if rlength < decryptsize:
size = rlength
data = rf.read(size)
if size % 4:
data += (4 - (size % 4)) * b'\x00'
outp = aes_cfb(data, key, iv)
wf.write(outp[:size])
if rlength > decryptsize:
copysub(rf, wf, start + size, rlength - size)
if rlength % 0x1000 != 0:
fill = bytearray([0x00 for i in range(0x1000 - (rlength % 0x1000))])
# wf.write(fill)
checkhashfile(os.path.join(path, wfilename), checksums, False)
def checkhashfile(wfilename, checksums, iscopy):
sha256sum = checksums[0]
md5sum = checksums[1]
if iscopy:
prefix = "Copy: "
else:
prefix = "Decrypt: "
with open(wfilename, "rb") as rf:
size = os.stat(wfilename).st_size
md5 = hashlib.md5(rf.read(0x40000))
sha256bad = False
md5bad = False
md5status = "empty"
sha256status = "empty"
if sha256sum != "":
for x in [0x40000, size]:
rf.seek(0)
sha256 = hashlib.sha256(rf.read(x))
if sha256sum != sha256.hexdigest():
sha256bad = True
sha256status = "bad"
else:
sha256status = "verified"
break
if md5sum != "":
if md5sum != md5.hexdigest():
md5bad = True
md5status = "bad"
else:
md5status = "verified"
if (sha256bad and md5bad) or (sha256bad and md5sum == "") or (md5bad and sha256sum == ""):
print(f"{prefix}error on hashes. File might be broken!")
else:
print(f"{prefix}success! (md5: {md5status} | sha256: {sha256status})")
def decryptitem(item, pagesize):
sha256sum = ""
md5sum = ""
wfilename = ""
start = -1
rlength = 0
decryptsize = 0x40000
if "Path" in item.attrib:
wfilename = item.attrib["Path"]
elif "filename" in item.attrib:
wfilename = item.attrib["filename"]
if "sha256" in item.attrib:
sha256sum = item.attrib["sha256"]
if "md5" in item.attrib:
md5sum = item.attrib["md5"]
if "FileOffsetInSrc" in item.attrib:
start = int(item.attrib["FileOffsetInSrc"]) * pagesize
elif "SizeInSectorInSrc" in item.attrib:
start = int(item.attrib["SizeInSectorInSrc"]) * pagesize
if "SizeInByteInSrc" in item.attrib:
rlength = int(item.attrib["SizeInByteInSrc"])
if "SizeInSectorInSrc" in item.attrib:
length = int(item.attrib["SizeInSectorInSrc"]) * pagesize
else:
length = rlength
return wfilename, start, length, rlength, [sha256sum, md5sum], decryptsize
def main(filename, outdir):
if not os.path.exists(outdir):
os.mkdir(outdir)
pk = False
with open(filename, "rb") as rf:
if rf.read(2) == b"PK":
pk = True
if pk:
print("Zip file detected, trying to decrypt files")
zippw = bytes("flash@realme$50E7F7D847732396F1582CD62DD385ED7ABB0897", 'utf-8')
with zipfile.ZipFile(filename) as file:
for zfile in file.namelist():
print("Extracting " + zfile + " to " + outdir)
file.extract(zfile, pwd=zippw, path=outdir)
print("Files extracted to " + outdir)
exit(0)
# key,iv=generatekey1()
pagesize, key, iv, data = generatekey2(filename)
if pagesize == 0:
print("Unknown key. Aborting")
exit(0)
else:
xml = data[:data.rfind(b">") + 1].decode('utf-8')
if "/" in filename:
path = filename[:filename.rfind("/")]
elif "\\" in filename:
path = filename[:filename.rfind("\\")]
else:
path = ""
path = os.path.join(path, outdir)
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
else:
os.mkdir(path)
print("Saving ProFile.xml")
file_handle = open(path + os.sep + "ProFile.xml", mode="w")
file_handle.write(xml)
file_handle.close()
root = ET.fromstring(xml)
for child in root:
for item in child:
if "Path" not in item.attrib and "filename" not in item.attrib:
for subitem in item:
wfilename, start, length, rlength, checksums, decryptsize = decryptitem(subitem, pagesize)
if wfilename == "" or start == -1:
continue
decryptfile(key, iv, filename, path, wfilename, start, length, rlength, checksums, decryptsize)
wfilename, start, length, rlength, checksums, decryptsize = decryptitem(item, pagesize)
if wfilename == "" or start == -1:
continue
if child.tag in ["Sahara"]:
decryptsize = rlength
if child.tag in ["Config", "Provision", "ChainedTableOfDigests", "DigestsToSign", "Firmware"]:
length = rlength
if child.tag in ["DigestsToSign", "ChainedTableOfDigests", "Firmware"]:
copy(filename, wfilename, path, start, length, checksums)
else:
decryptfile(key, iv, filename, path, wfilename, start, length, rlength, checksums, decryptsize)
print("\nDone. Extracted files to " + path)
exit(0)
if __name__ == "__main__":
main()

742
opscrypto.py Normal file
View File

@ -0,0 +1,742 @@
#!/usr/bin/env python3
# Oneplus Decrypter (c) V 1.4 B.Kerler 2019-2022
# Licensed under MIT License
"""
Usage:
opscrypto.py --help
opscrypto.py encryptfile <filename>
opscrypto.py decryptfile <filename>
opscrypto.py decrypt <filename>
opscrypto.py encrypt <directory> [--projid=value] [--firmwarename=name] [--savename=out.ops] [--mbox=version]
Options:
--projid=value Set projid Example:18801
--mbox=version Set encryption key [default: 5]
--firmwarename=name Set firmware version Example:fajita_41_J.42_191214
--savename=name Set ops filename [default: out.ops]
"""
import sys
from docopt import docopt
args = docopt(__doc__, version='1.2')
import shutil
import os
from struct import pack, unpack
import xml.etree.ElementTree as ET
import hashlib
from pathlib import Path
from queue import Queue
import mmap
def mmap_io(filename, mode, length=0):
if mode == "rb":
with open(filename, mode="rb") as file_obj:
return mmap.mmap(file_obj.fileno(), length=0, access=mmap.ACCESS_READ)
elif mode == "wb":
if os.path.exists(filename):
length = os.stat(filename).st_size
else:
with open(filename, "wb") as wf:
wf.write(length * b'\0')
wf.close()
with open(filename, mode="r+b") as file_obj:
return mmap.mmap(file_obj.fileno(), length=length, access=mmap.ACCESS_WRITE)
# mmap_obj.flush() on finish
key = unpack("<4I", bytes.fromhex("d1b5e39e5eea049d671dd5abd2afcbaf"))
# guacamoles_31_O.09_190820
mbox5 = [0x60, 0x8a, 0x3f, 0x2d, 0x68, 0x6b, 0xd4, 0x23, 0x51, 0x0c,
0xd0, 0x95, 0xbb, 0x40, 0xe9, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0a, 0x00]
# instantnoodlev_15_O.07_201103
mbox6 = [0xAA, 0x69, 0x82, 0x9E, 0x5D, 0xDE, 0xB1, 0x3D, 0x30, 0xBB,
0x81, 0xA3, 0x46, 0x65, 0xa3, 0xe1, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0a, 0x00]
# guacamolet_21_O.08_190502
mbox4 = [0xC4, 0x5D, 0x05, 0x71, 0x99, 0xDD, 0xBB, 0xEE, 0x29, 0xA1,
0x6D, 0xC7, 0xAD, 0xBF, 0xA4, 0x3F, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0a, 0x00]
sbox = bytes.fromhex("c66363a5c66363a5f87c7c84f87c7c84ee777799ee777799f67b7b8df67b7b8d" +
"fff2f20dfff2f20dd66b6bbdd66b6bbdde6f6fb1de6f6fb191c5c55491c5c554" +
"60303050603030500201010302010103ce6767a9ce6767a9562b2b7d562b2b7d" +
"e7fefe19e7fefe19b5d7d762b5d7d7624dababe64dababe6ec76769aec76769a" +
"8fcaca458fcaca451f82829d1f82829d89c9c94089c9c940fa7d7d87fa7d7d87" +
"effafa15effafa15b25959ebb25959eb8e4747c98e4747c9fbf0f00bfbf0f00b" +
"41adadec41adadecb3d4d467b3d4d4675fa2a2fd5fa2a2fd45afafea45afafea" +
"239c9cbf239c9cbf53a4a4f753a4a4f7e4727296e47272969bc0c05b9bc0c05b" +
"75b7b7c275b7b7c2e1fdfd1ce1fdfd1c3d9393ae3d9393ae4c26266a4c26266a" +
"6c36365a6c36365a7e3f3f417e3f3f41f5f7f702f5f7f70283cccc4f83cccc4f" +
"6834345c6834345c51a5a5f451a5a5f4d1e5e534d1e5e534f9f1f108f9f1f108" +
"e2717193e2717193abd8d873abd8d87362313153623131532a15153f2a15153f" +
"0804040c0804040c95c7c75295c7c75246232365462323659dc3c35e9dc3c35e" +
"3018182830181828379696a1379696a10a05050f0a05050f2f9a9ab52f9a9ab5" +
"0e0707090e07070924121236241212361b80809b1b80809bdfe2e23ddfe2e23d" +
"cdebeb26cdebeb264e2727694e2727697fb2b2cd7fb2b2cdea75759fea75759f" +
"1209091b1209091b1d83839e1d83839e582c2c74582c2c74341a1a2e341a1a2e" +
"361b1b2d361b1b2ddc6e6eb2dc6e6eb2b45a5aeeb45a5aee5ba0a0fb5ba0a0fb" +
"a45252f6a45252f6763b3b4d763b3b4db7d6d661b7d6d6617db3b3ce7db3b3ce" +
"5229297b5229297bdde3e33edde3e33e5e2f2f715e2f2f711384849713848497" +
"a65353f5a65353f5b9d1d168b9d1d1680000000000000000c1eded2cc1eded2c" +
"4020206040202060e3fcfc1fe3fcfc1f79b1b1c879b1b1c8b65b5bedb65b5bed" +
"d46a6abed46a6abe8dcbcb468dcbcb4667bebed967bebed97239394b7239394b" +
"944a4ade944a4ade984c4cd4984c4cd4b05858e8b05858e885cfcf4a85cfcf4a" +
"bbd0d06bbbd0d06bc5efef2ac5efef2a4faaaae54faaaae5edfbfb16edfbfb16" +
"864343c5864343c59a4d4dd79a4d4dd766333355663333551185859411858594" +
"8a4545cf8a4545cfe9f9f910e9f9f9100402020604020206fe7f7f81fe7f7f81" +
"a05050f0a05050f0783c3c44783c3c44259f9fba259f9fba4ba8a8e34ba8a8e3" +
"a25151f3a25151f35da3a3fe5da3a3fe804040c0804040c0058f8f8a058f8f8a" +
"3f9292ad3f9292ad219d9dbc219d9dbc7038384870383848f1f5f504f1f5f504" +
"63bcbcdf63bcbcdf77b6b6c177b6b6c1afdada75afdada754221216342212163" +
"2010103020101030e5ffff1ae5ffff1afdf3f30efdf3f30ebfd2d26dbfd2d26d" +
"81cdcd4c81cdcd4c180c0c14180c0c142613133526131335c3ecec2fc3ecec2f" +
"be5f5fe1be5f5fe1359797a2359797a2884444cc884444cc2e1717392e171739" +
"93c4c45793c4c45755a7a7f255a7a7f2fc7e7e82fc7e7e827a3d3d477a3d3d47" +
"c86464acc86464acba5d5de7ba5d5de73219192b3219192be6737395e6737395" +
"c06060a0c06060a019818198198181989e4f4fd19e4f4fd1a3dcdc7fa3dcdc7f" +
"4422226644222266542a2a7e542a2a7e3b9090ab3b9090ab0b8888830b888883" +
"8c4646ca8c4646cac7eeee29c7eeee296bb8b8d36bb8b8d32814143c2814143c" +
"a7dede79a7dede79bc5e5ee2bc5e5ee2160b0b1d160b0b1daddbdb76addbdb76" +
"dbe0e03bdbe0e03b6432325664323256743a3a4e743a3a4e140a0a1e140a0a1e" +
"924949db924949db0c06060a0c06060a4824246c4824246cb85c5ce4b85c5ce4" +
"9fc2c25d9fc2c25dbdd3d36ebdd3d36e43acacef43acacefc46262a6c46262a6" +
"399191a8399191a8319595a4319595a4d3e4e437d3e4e437f279798bf279798b" +
"d5e7e732d5e7e7328bc8c8438bc8c8436e3737596e373759da6d6db7da6d6db7" +
"018d8d8c018d8d8cb1d5d564b1d5d5649c4e4ed29c4e4ed249a9a9e049a9a9e0" +
"d86c6cb4d86c6cb4ac5656faac5656faf3f4f407f3f4f407cfeaea25cfeaea25" +
"ca6565afca6565aff47a7a8ef47a7a8e47aeaee947aeaee91008081810080818" +
"6fbabad56fbabad5f0787888f07878884a25256f4a25256f5c2e2e725c2e2e72" +
"381c1c24381c1c2457a6a6f157a6a6f173b4b4c773b4b4c797c6c65197c6c651" +
"cbe8e823cbe8e823a1dddd7ca1dddd7ce874749ce874749c3e1f1f213e1f1f21" +
"964b4bdd964b4bdd61bdbddc61bdbddc0d8b8b860d8b8b860f8a8a850f8a8a85" +
"e0707090e07070907c3e3e427c3e3e4271b5b5c471b5b5c4cc6666aacc6666aa" +
"904848d8904848d80603030506030305f7f6f601f7f6f6011c0e0e121c0e0e12" +
"c26161a3c26161a36a35355f6a35355fae5757f9ae5757f969b9b9d069b9b9d0" +
"178686911786869199c1c15899c1c1583a1d1d273a1d1d27279e9eb9279e9eb9" +
"d9e1e138d9e1e138ebf8f813ebf8f8132b9898b32b9898b32211113322111133" +
"d26969bbd26969bba9d9d970a9d9d970078e8e89078e8e89339494a7339494a7" +
"2d9b9bb62d9b9bb63c1e1e223c1e1e221587879215878792c9e9e920c9e9e920" +
"87cece4987cece49aa5555ffaa5555ff5028287850282878a5dfdf7aa5dfdf7a" +
"038c8c8f038c8c8f59a1a1f859a1a1f809898980098989801a0d0d171a0d0d17" +
"65bfbfda65bfbfdad7e6e631d7e6e631844242c6844242c6d06868b8d06868b8" +
"824141c3824141c3299999b0299999b05a2d2d775a2d2d771e0f0f111e0f0f11" +
"7bb0b0cb7bb0b0cba85454fca85454fc6dbbbbd66dbbbbd62c16163a2c16163a")
class QCSparse:
def __init__(self, filename):
self.rf = mmap_io(filename, "rb")
self.data = Queue()
self.offset = 0
self.tmpdata = bytearray()
self.major_version = None
self.minor_version = None
self.file_hdr_sz = None
self.chunk_hdr_sz = None
self.blk_sz = None
self.total_blks = None
self.total_chunks = None
self.image_checksum = None
self.info = print
self.debug = print
self.error = print
self.warning = print
def readheader(self, offset):
self.rf.seek(offset)
header = unpack("<I4H4I", self.rf.read(0x1C))
magic = header[0]
self.major_version = header[1]
self.minor_version = header[2]
self.file_hdr_sz = header[3]
self.chunk_hdr_sz = header[4]
self.blk_sz = header[5]
self.total_blks = header[6]
self.total_chunks = header[7]
self.image_checksum = header[8]
if magic != 0xED26FF3A:
return False
if self.file_hdr_sz != 28:
self.error("The file header size was expected to be 28, but is %u." % self.file_hdr_sz)
return False
if self.chunk_hdr_sz != 12:
self.error("The chunk header size was expected to be 12, but is %u." % self.chunk_hdr_sz)
return False
self.info("Sparse Format detected. Using unpacked image.")
return True
def get_chunk_size(self):
if self.total_blks < self.offset:
self.error(
"The header said we should have %u output blocks, but we saw %u" % (self.total_blks, self.offset))
return -1
header = unpack("<2H2I", self.rf.read(self.chunk_hdr_sz))
chunk_type = header[0]
chunk_sz = header[2]
total_sz = header[3]
data_sz = total_sz - 12
if chunk_type == 0xCAC1:
if data_sz != (chunk_sz * self.blk_sz):
self.error(
"Raw chunk input size (%u) does not match output size (%u)" % (data_sz, chunk_sz * self.blk_sz))
return -1
else:
self.rf.seek(self.rf.tell() + chunk_sz * self.blk_sz)
return chunk_sz * self.blk_sz
elif chunk_type == 0xCAC2:
if data_sz != 4:
self.error("Fill chunk should have 4 bytes of fill, but this has %u" % data_sz)
return -1
else:
return chunk_sz * self.blk_sz // 4
elif chunk_type == 0xCAC3:
return chunk_sz * self.blk_sz
elif chunk_type == 0xCAC4:
if data_sz != 4:
self.error("CRC32 chunk should have 4 bytes of CRC, but this has %u" % data_sz)
return -1
else:
self.rf.seek(self.rf.tell() + 4)
return 0
else:
self.debug("Unknown chunk type 0x%04X" % chunk_type)
return -1
def unsparse(self):
if self.total_blks < self.offset:
self.error(
"The header said we should have %u output blocks, but we saw %u" % (self.total_blks, self.offset))
return -1
header = unpack("<2H2I", self.rf.read(self.chunk_hdr_sz))
chunk_type = header[0]
chunk_sz = header[2]
total_sz = header[3]
data_sz = total_sz - 12
if chunk_type == 0xCAC1:
if data_sz != (chunk_sz * self.blk_sz):
self.error(
"Raw chunk input size (%u) does not match output size (%u)" % (data_sz, chunk_sz * self.blk_sz))
return -1
else:
# self.debug("Raw data")
data = self.rf.read(chunk_sz * self.blk_sz)
self.offset += chunk_sz
return data
elif chunk_type == 0xCAC2:
if data_sz != 4:
self.error("Fill chunk should have 4 bytes of fill, but this has %u" % data_sz)
return -1
else:
fill_bin = self.rf.read(4)
fill = unpack("<I", fill_bin)
# self.debug(format("Fill with 0x%08X" % fill))
data = fill_bin * (chunk_sz * self.blk_sz // 4)
self.offset += chunk_sz
return data
elif chunk_type == 0xCAC3:
data = b'\x00' * chunk_sz * self.blk_sz
self.offset += chunk_sz
return data
elif chunk_type == 0xCAC4:
if data_sz != 4:
self.error("CRC32 chunk should have 4 bytes of CRC, but this has %u" % data_sz)
return -1
else:
crc_bin = self.rf.read(4)
crc = unpack("<I", crc_bin)
# self.debug(format("Unverified CRC32 0x%08X" % crc))
return b""
else:
# self.debug("Unknown chunk type 0x%04X" % chunk_type)
return -1
def getsize(self):
self.rf.seek(0x1C)
length = 0
chunk = 0
while chunk < self.total_chunks:
tlen = self.get_chunk_size()
if tlen == -1:
break
length += tlen
chunk += 1
self.rf.seek(0x1C)
return length
def read(self, length=None):
if length is None:
return self.unsparse()
if length <= len(self.tmpdata):
tdata = self.tmpdata[:length]
self.tmpdata = self.tmpdata[length:]
return tdata
while len(self.tmpdata) < length:
self.tmpdata.extend(self.unsparse())
if length <= len(self.tmpdata):
tdata = self.tmpdata[:length]
self.tmpdata = self.tmpdata[length:]
return tdata
def gsbox(offset):
return int.from_bytes(sbox[offset:offset + 4], 'little')
def key_update(iv1, asbox):
d = iv1[0] ^ asbox[0] # 9EE3B5B1
a = iv1[1] ^ asbox[1]
b = iv1[2] ^ asbox[2] # ABD51D58
c = iv1[3] ^ asbox[3] # AFCBAFFF
e = gsbox(((b >> 0x10) & 0xff) * 8 + 2) ^ gsbox(((a >> 8) & 0xff) * 8 + 3) ^ gsbox((c >> 0x18) * 8 + 1) ^ \
gsbox((d & 0xff) * 8) ^ asbox[4] # 35C2A10B
h = gsbox(((c >> 0x10) & 0xff) * 8 + 2) ^ gsbox(((b >> 8) & 0xff) * 8 + 3) ^ gsbox((d >> 0x18) * 8 + 1) ^ \
gsbox((a & 0xff) * 8) ^ asbox[5] # 75CF3118
i = gsbox(((d >> 0x10) & 0xff) * 8 + 2) ^ gsbox(((c >> 8) & 0xff) * 8 + 3) ^ gsbox((a >> 0x18) * 8 + 1) ^ \
gsbox((b & 0xff) * 8) ^ asbox[6] # 6AD3F5C4
a = gsbox(((d >> 8) & 0xff) * 8 + 3) ^ gsbox(((a >> 0x10) & 0xff) * 8 + 2) ^ gsbox((b >> 0x18) * 8 + 1) ^ \
gsbox((c & 0xff) * 8) ^ asbox[7] # D99AC8FB
g = 8
for f in range(asbox[0x3c] - 2):
d = e >> 0x18 # 35
m = h >> 0x10 # cf
s = h >> 0x18
z = e >> 0x10
l = i >> 0x18
t = e >> 8
e = gsbox(((i >> 0x10) & 0xff) * 8 + 2) ^ gsbox(((h >> 8) & 0xff) * 8 + 3) ^ \
gsbox((a >> 0x18) * 8 + 1) ^ gsbox((e & 0xff) * 8) ^ asbox[g] # B67F2106, 82508918
h = gsbox(((a >> 0x10) & 0xff) * 8 + 2) ^ gsbox(((i >> 8) & 0xff) * 8 + 3) ^ \
gsbox(d * 8 + 1) ^ gsbox((h & 0xff) * 8) ^ asbox[g + 1] # 85813F52
i = gsbox((z & 0xff) * 8 + 2) ^ gsbox(((a >> 8) & 0xff) * 8 + 3) ^ \
gsbox(s * 8 + 1) ^ gsbox((i & 0xff) * 8) ^ asbox[g + 2] # C8022573
a = gsbox((t & 0xff) * 8 + 3) ^ gsbox((m & 0xff) * 8 + 2) ^ \
gsbox(l * 8 + 1) ^ gsbox((a & 0xff) * 8) ^ asbox[g + 3] # AD34EC55
g = g + 4
# a=6DB8AA0E
# b=ABD51D58
# c=AFCBAFFF
# d=51
# e=AC402324
# h=B2D24440
# i=CC2ADF24
# t=510805
return [(gsbox(((i >> 0x10) & 0xff) * 8) & 0xff0000) ^ (gsbox(((h >> 8) & 0xff) * 8 + 1) & 0xff00) ^
(gsbox((a >> 0x18) * 8 + 3) & 0xff000000) ^ gsbox((e & 0xff) * 8 + 2) & 0xFF ^ asbox[g],
(gsbox(((a >> 0x10) & 0xff) * 8) & 0xff0000) ^ (gsbox(((i >> 8) & 0xff) * 8 + 1) & 0xff00) ^
(gsbox((e >> 0x18) * 8 + 3) & 0xff000000) ^ (gsbox((h & 0xff) * 8 + 2) & 0xFF) ^ asbox[g + 3],
(gsbox(((e >> 0x10) & 0xff) * 8) & 0xff0000) ^ (gsbox(((a >> 8) & 0xff) * 8 + 1) & 0xff00) ^
(gsbox((h >> 0x18) * 8 + 3) & 0xff000000) ^ (gsbox((i & 0xff) * 8 + 2) & 0xFF) ^ asbox[g + 2],
(gsbox(((h >> 0x10) & 0xff) * 8) & 0xff0000) ^ (gsbox(((e >> 8) & 0xff) * 8 + 1) & 0xff00) ^
(gsbox((i >> 0x18) * 8 + 3) & 0xff000000) ^ (gsbox((a & 0xff) * 8 + 2) & 0xFF) ^ asbox[g + 1]]
def key_custom(inp, rkey, outlength=0, encrypt=False):
outp = bytearray()
inp = bytearray(inp)
pos = outlength
outp_extend = outp.extend
ptr = 0
length = len(inp)
if outlength != 0:
while pos < len(rkey):
if length == 0:
break
buffer = inp[pos]
outp_extend(rkey[pos] ^ buffer)
rkey[pos] = buffer
length -= 1
pos += 1
if length > 0xF:
for ptr in range(0, length, 0x10):
rkey = key_update(rkey, mbox)
if pos < 0x10:
slen = ((0xf - pos) >> 2) + 1
tmp = [rkey[i] ^ int.from_bytes(inp[pos + (i * 4) + ptr:pos + (i * 4) + ptr + 4], "little") for i in
range(0, slen)]
outp.extend(b"".join(tmp[i].to_bytes(4, 'little') for i in range(0, slen)))
if encrypt:
rkey = tmp
else:
rkey = [int.from_bytes(inp[pos + (i * 4) + ptr:pos + (i * 4) + ptr + 4], "little") for i in
range(0, slen)]
length = length - 0x10
if length != 0:
rkey = key_update(rkey, sbox)
j = pos
m = 0
while length > 0:
data = inp[j + ptr:j + ptr + 4]
if len(data) < 4:
data += b"\x00" * (4 - len(data))
tmp = int.from_bytes(data, 'little')
outp_extend((tmp ^ rkey[m]).to_bytes(4, 'little'))
if encrypt:
rkey[m] = tmp ^ rkey[m]
else:
rkey[m] = tmp
length -= 4
j += 4
m += 1
return outp
def extractxml(filename, key):
with mmap_io(filename, 'rb') as rf:
sfilename = os.path.join(filename[:-len(os.path.basename(filename))], "extract", "settings.xml")
filesize = os.stat(filename).st_size
rf.seek(filesize - 0x200)
hdr = rf.read(0x200)
xmllength = int.from_bytes(hdr[0x18:0x18 + 4], 'little')
xmlpad = 0x200 - (xmllength % 0x200)
rf.seek(filesize - 0x200 - (xmllength + xmlpad))
inp = rf.read(xmllength + xmlpad)
outp = key_custom(inp, key, 0)
if b"xml " not in outp:
return None
with mmap_io(sfilename, 'wb', xmllength) as wf:
wf.write(outp[:xmllength])
return outp[:xmllength].decode('utf-8')
def decryptfile(rkey, filename, path, wfilename, start, length):
sha256 = hashlib.sha256()
print(f"Extracting {wfilename}")
with mmap_io(filename, 'rb') as rf:
rf.seek(start)
data = rf.read(length)
if length % 4:
data += (4 - (length % 4)) * b'\x00'
outp = key_custom(data, rkey, 0)
sha256.update(outp[:length])
with mmap_io(os.path.join(path, wfilename), 'wb', length) as wf:
wf.write(outp[:length])
if length % 0x1000 > 0:
sha256.update(b"\x00" * (0x1000 - (length % 0x1000)))
return sha256.hexdigest()
def encryptsubsub(rkey, data, wf):
length = len(data)
if length % 4:
data += (4 - (length % 4)) * b'\x00'
outp = key_custom(data, rkey, 0, True)
wf.write(outp[:length])
return length
def encryptsub(rkey, rf, wf):
data = rf.read()
return encryptsubsub(rkey, data, wf)
def encryptfile(key, filename, wfilename):
print(f"Encrypting {filename}")
with mmap_io(filename, 'rb') as rf:
filesize = os.stat(filename).st_size
with mmap_io(wfilename, 'wb', filesize) as wf:
return encryptsub(key, rf, wf)
def calc_digest(filename):
with mmap_io(filename, 'rb') as rf:
data = rf.read()
sha256 = hashlib.sha256()
sha256.update(data)
if len(data) % 0x1000 > 0:
sha256.update(b"\x00" * (0x1000 - (len(data) % 0x1000)))
return sha256.hexdigest()
def copysub(rf, wf, start, length):
rf.seek(start)
rlen = 0
while length > 0:
if length < 0x100000:
size = length
else:
size = 0x100000
data = rf.read(size)
wf.write(data)
rlen += len(data)
length -= size
return rlen
def copyfile(filename, path, wfilename, start, length):
print(f"Extracting {wfilename}")
with mmap_io(filename, 'rb') as rf:
with mmap_io(os.path.join(path, wfilename), 'wb', length) as wf:
return copysub(rf, wf, start, length)
def encryptitem(key, item, directory, pos, wf):
try:
filename = item.attrib["Path"]
except:
filename = item.attrib["filename"]
if filename == "":
return item, pos
filename = os.path.join(directory, filename)
start = pos // 0x200
item.attrib["FileOffsetInSrc"] = str(start)
size = os.stat(filename).st_size
item.attrib["SizeInByteInSrc"] = str(size)
sectors = size // 0x200
if (size % 0x200) != 0:
sectors += 1
item.attrib["SizeInSectorInSrc"] = str(sectors)
with mmap_io(filename, 'rb') as rf:
rlen = encryptsub(key, rf, wf)
pos += rlen
if (rlen % 0x200) != 0:
sublen = 0x200 - (rlen % 0x200)
wf.write(b'\x00' * sublen)
pos += sublen
return item, pos
def copyitem(item, directory, pos, wf):
try:
filename = item.attrib["Path"]
except:
filename = item.attrib["filename"]
if filename == "":
return item, pos
filename = os.path.join(directory, filename)
start = pos // 0x200
item.attrib["FileOffsetInSrc"] = str(start)
size = os.stat(filename).st_size
item.attrib["SizeInByteInSrc"] = str(size)
sectors = size // 0x200
if (size % 0x200) != 0:
sectors += 1
item.attrib["SizeInSectorInSrc"] = str(sectors)
with mmap_io(filename, 'rb') as rf:
rlen = copysub(rf, wf, 0, size)
pos += rlen
if (rlen % 0x200) != 0:
sublen = 0x200 - (rlen % 0x200)
wf.write(b'\x00' * sublen)
pos += sublen
return item, pos
def main():
global mbox
print("Oneplus CryptTools V1.4 (c) B. Kerler 2019-2021\n----------------------------\n")
if args["decrypt"]:
filename = args["<filename>"].replace("\\", "/")
print(f"Extracting {filename}")
if "/" in filename:
path = filename[:filename.rfind("/")]
else:
path = ""
path = os.path.join(path, "extract")
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
else:
os.mkdir(path)
mbox = mbox5
xml = extractxml(filename, key)
if xml is not None:
print("MBox5")
else:
mbox = mbox6
xml = extractxml(filename, key)
if xml is not None:
print("MBox6")
else:
mbox = mbox4
xml = extractxml(filename, key)
if xml is not None:
print("MBox4")
else:
print("Unsupported key !")
exit(0)
root = ET.fromstring(xml)
for child in root:
if child.tag == "SAHARA":
for item in child:
if item.tag == "File":
wfilename = item.attrib["Path"]
start = int(item.attrib["FileOffsetInSrc"]) * 0x200
slength = int(item.attrib["SizeInSectorInSrc"]) * 0x200
length = int(item.attrib["SizeInByteInSrc"])
decryptfile(key, filename, path, wfilename, start, length)
elif child.tag == "UFS_PROVISION":
for item in child:
if item.tag == "File":
wfilename = item.attrib["Path"]
start = int(item.attrib["FileOffsetInSrc"]) * 0x200
# length = int(item.attrib["SizeInSectorInSrc"]) * 0x200
length = int(item.attrib["SizeInByteInSrc"])
copyfile(filename, path, wfilename, start, length)
elif "Program" in child.tag:
# if not os.path.exists(os.path.join(path, child.tag)):
# os.mkdir(os.path.join(path, child.tag))
# spath = os.path.join(path, child.tag)
for item in child:
if "filename" in item.attrib:
sparse = item.attrib["sparse"] == "true"
wfilename = item.attrib["filename"]
if wfilename == "":
continue
start = int(item.attrib["FileOffsetInSrc"]) * 0x200
slength = int(item.attrib["SizeInSectorInSrc"]) * 0x200
length = int(item.attrib["SizeInByteInSrc"])
sha256 = item.attrib["Sha256"]
copyfile(filename, path, wfilename, start, length)
csha256 = calc_digest(os.path.join(path, wfilename))
if sha256 != csha256 and not sparse:
print("Sha256 fail.")
else:
for subitem in item:
if "filename" in subitem.attrib:
wfilename = subitem.attrib["filename"]
sparse = subitem.attrib["sparse"] == "true"
if wfilename == "":
continue
start = int(subitem.attrib["FileOffsetInSrc"]) * 0x200
slength = int(subitem.attrib["SizeInSectorInSrc"]) * 0x200
length = int(subitem.attrib["SizeInByteInSrc"])
sha256 = subitem.attrib["Sha256"]
copyfile(filename, path, wfilename, start, length)
csha256 = calc_digest(os.path.join(path, wfilename))
if sha256 != csha256 and not sparse:
print("Sha256 fail.")
# else:
# print (child.tag, child.attrib)
print("Done. Extracted files to " + path)
exit(0)
elif args["encrypt"]:
if args["--mbox"] == "4":
mbox = mbox4
elif args["--mbox"] == "5":
mbox = mbox5
elif args["--mbox"] == "6":
mbox = mbox6
directory = args["<directory>"].replace("\\", "/")
settings = os.path.join(directory, "settings.xml")
# root = ET.fromstring(settings)
tree = ET.parse(settings)
root = tree.getroot()
outfilename = os.path.join(Path(directory).parent, args["--savename"])
projid = None
firmware = None
if os.path.exists(outfilename):
os.remove(outfilename)
with open(outfilename, 'wb') as wf:
pos = 0
for child in root:
if child.tag == "BasicInfo":
if "Project" in child.attrib:
projid = child.attrib["Project"]
if "Version" in child.attrib:
firmware = child.attrib["Version"]
if child.tag == "SAHARA":
for item in child:
if item.tag == "File":
item, pos = encryptitem(key, item, directory, pos, wf)
elif child.tag == "UFS_PROVISION":
for item in child:
if item.tag == "File":
item, pos = copyitem(item, directory, pos, wf)
elif "Program" in child.tag:
for item in child:
if "filename" in item.attrib:
item, pos = copyitem(item, directory, pos, wf)
else:
for subitem in item:
subitem, pos = copyitem(subitem, directory, pos, wf)
try:
configpos = pos // 0x200
with open(settings, 'rb') as rf:
data = rf.read()
rlength = len(data)
data += (0x10 - (rlength % 0x10)) * b"\x00"
rlen = encryptsubsub(key, data, wf)
if ((rlen + pos) % 0x200) != 0:
sublen = 0x200 - ((rlen + pos) % 0x200)
wf.write(b'\x00' * sublen)
pos += sublen
if args["--projid"] is None:
if projid is None:
projid = "18801"
else:
projid = args["--projid"]
if args["--firmwarename"] is None:
if firmware is None:
firmware = "fajita_41_J.42_191214"
else:
firmware = args["--firmwarename"]
magic = 0x7CEF
hdr = b""
hdr += pack("<I", 2)
hdr += pack("<I", 1)
hdr += pack("<I", 0)
hdr += pack("<I", 0)
hdr += pack("<I", magic)
hdr += pack("<I", configpos)
hdr += pack("<I", rlength)
hdr += bytes(projid, 'utf-8')
hdr += b"\x00" * (0x10 - len(projid))
hdr += bytes(firmware, 'utf-8')
hdr += b"\x00" * (0x200 - len(hdr))
wf.write(hdr)
with open(outfilename, 'rb') as rt:
with open("md5sum_pack.md5", 'wb') as wt:
mt = hashlib.md5()
mt.update(rt.read())
wt.write(bytes(mt.hexdigest(), 'utf-8') + b" " + bytes(os.path.basename(outfilename), 'utf-8'))
print("Done. Created " + outfilename)
except Exception as e:
print(e)
exit(0)
elif args["encryptfile"]:
filename = args["<filename>"].replace("\\", "/")
mbox = mbox5
encryptfile(key, filename, filename + ".enc")
print("Done.")
elif args["decryptfile"]:
filename = args["<filename>"].replace("\\", "/")
mbox = mbox5
fsize = os.stat(filename).st_size
decryptfile(key, filename, "", filename + ".dec", 0, fsize)
print("Done.")
else:
print("Usage:./opsdecrypt.py decrypt [filename.ops]")
exit(0)
if __name__ == "__main__":
main()

318
ozipdecrypt.py Normal file
View File

@ -0,0 +1,318 @@
#!/usr/bin/env python3
# (c) B. Kerler 2017-2020, licensed under MIT license
"""
Usage:
ozipdecrypt.py --help
ozipdecrypt.py <filename>
Options:
Mode 1 for regular ozip, Mode 2 for CPH1803/CPH1909 [default: 1]
"""
import os
import stat
import shutil
import binascii
from Crypto.Cipher import AES
import zipfile
from os.path import basename
def main(file_arg):
keys = [
"D6EECF0AE5ACD4E0E9FE522DE7CE381E", # mnkey
"D6ECCF0AE5ACD4E0E92E522DE7C1381E", # mkey
"D6DCCF0AD5ACD4E0292E522DB7C1381E",
# realkey, R9s CPH1607 MSM8953, Plus, R11, RMX1921 Realme XT, RMX1851EX Realme Android 10, RMX1992EX_11_OTA_1050
"D7DCCE1AD4AFDCE2393E5161CBDC4321", # testkey
"D7DBCE2AD4ADDCE1393E5521CBDC4321", # utilkey
"D7DBCE1AD4AFDCE1393E5121CBDC4321", # R11s CPH1719 MSM8976, Plus
"D4D2CD61D4AFDCE13B5E01221BD14D20", # FindX CPH1871 SDM845
"261CC7131D7C1481294E532DB752381E", # FindX
"1CA21E12271335AE33AB81B2A7B14622", # Realme 2 pro SDM660/MSM8976
"D4D2CE11D4AFDCE13B3E0121CBD14D20", # K1 SDM660/MSM8976
"1C4C1EA3A12531AE491B21BB31613C11", # Realme 3 Pro SDM710, X, 5 Pro, Q, RMX1921 Realme XT
"1C4C1EA3A12531AE4A1B21BB31C13C21", # Reno 10x zoom PCCM00 SDM855, CPH1921EX Reno 5G
"1C4A11A3A12513AE441B23BB31513121", # Reno 2 PCKM00 SDM730G
"1C4A11A3A12589AE441A23BB31517733", # Realme X2 SDM730G
"1C4A11A3A22513AE541B53BB31513121", # Realme 5 SDM665
"2442CE821A4F352E33AE81B22BC1462E", # R17 Pro SDM710
"14C2CD6214CFDC2733AE81B22BC1462C", # CPH1803 OppoA3s SDM450/MSM8953
"1E38C1B72D522E29E0D4ACD50ACFDCD6",
"12341EAAC4C123CE193556A1BBCC232D",
"2143DCCB21513E39E1DCAFD41ACEDBD7",
"2D23CCBBA1563519CE23C1C4AA1E3412", # A77 CPH1715 MT6750T
"172B3E14E46F3CE13E2B5121CBDC4321", # Realme 1 MTK P60
"ACAA1E12A71431CE4A1B21BBA1C1C6A2", # Realme U1 RMX1831 MTK P70
"ACAC1E13A72531AE4A1B22BB31C1CC22", # Realme 3 RMX1825EX P70
"1C4411A3A12533AE441B21BB31613C11", # A1k CPH1923 MTK P22
"1C4416A8A42717AE441523B336513121", # Reno 3 PCRM00 MTK 1000L, CPH2059 OPPO A92, CPH2067 OPPO A72
"55EEAA33112133AE441B23BB31513121", # RenoAce SDM855Plus
"ACAC1E13A12531AE4A1B21BB31C13C21", # Reno, K3
"ACAC1E13A72431AE4A1B22BBA1C1C6A2", # A9
"12CAC11211AAC3AEA2658690122C1E81", # A1,A83t
"1CA21E12271435AE331B81BBA7C14612", # CPH1909 OppoA5s MT6765
"D1DACF24351CE428A9CE32ED87323216", # Realme1(reserved)
"A1CC75115CAECB890E4A563CA1AC67C8", # A73(reserved)
"2132321EA2CA86621A11241ABA512722", # Realme3(reserved)
"22A21E821743E5EE33AE81B227B1462E"
# F3 Plus CPH1613 - MSM8976
]
def keytest(data):
for key in keys:
ctx = AES.new(binascii.unhexlify(key), AES.MODE_ECB)
dat = ctx.decrypt(data)
if (dat[0:4] == b'\x50\x4B\x03\x04'):
print("Found correct AES key: " + key)
return binascii.unhexlify(key)
elif (dat[0:4] == b'\x41\x56\x42\x30'):
print("Found correct AES key: " + key)
return binascii.unhexlify(key)
elif (dat[0:4] == b'\x41\x4E\x44\x52'):
print("Found correct AES key: " + key)
return binascii.unhexlify(key)
return -1
def del_rw(action, name, exc):
os.chmod(name, stat.S_IWRITE)
os.remove(name)
def rmrf(path):
if os.path.exists(path):
if os.path.isfile(path):
del_rw("", path, "")
else:
shutil.rmtree(path, onerror=del_rw)
def decryptfile(key, rfilename):
with open(rfilename, 'rb') as rr:
with open(rfilename + ".tmp", 'wb') as wf:
rr.seek(0x10)
dsize = int(rr.read(0x10).replace(b"\x00", b"").decode('utf-8'), 10)
rr.seek(0x1050)
print("Decrypting " + rfilename)
flen = os.stat(rfilename).st_size - 0x1050
ctx = AES.new(key, AES.MODE_ECB)
while (dsize > 0):
if flen > 0x4000:
size = 0x4000
else:
size = flen
data = rr.read(size)
if dsize < size:
size = dsize
if len(data) == 0:
break
dr = ctx.decrypt(data)
wf.write(dr[:size])
flen -= size
dsize -= size
os.remove(rfilename)
os.rename(rfilename + ".tmp", rfilename)
def decryptfile2(key, rfilename, wfilename):
with open(rfilename, 'rb') as rr:
with open(wfilename, 'wb') as wf:
ctx = AES.new(key, AES.MODE_ECB)
bstart = 0
goon = True
while (goon):
rr.seek(bstart)
header = rr.read(12)
if len(header) == 0:
break
if header != b"OPPOENCRYPT!":
return 1
rr.seek(0x10 + bstart)
bdsize = int(rr.read(0x10).replace(b"\x00", b"").decode('utf-8'), 10)
if bdsize < 0x40000:
goon = False
rr.seek(0x50 + bstart)
while (bdsize > 0):
data = rr.read(0x10)
if len(data) == 0:
break
size = 0x10;
if bdsize < 0x10:
size = bdsize
dr = ctx.decrypt(data)
wf.write(dr[:size])
bdsize -= 0x10
data = rr.read(0x3FF0)
if len(data) == 0:
break
bdsize -= 0x3FF0
wf.write(data)
bstart = bstart + 0x40000 + 0x50
return 0
def mode2(filename):
temp = os.path.join(os.path.abspath(os.path.dirname(filename)), "temp")
out = os.path.join(os.path.abspath(os.path.dirname(filename)), "out")
with open(filename, 'rb') as fr:
magic = fr.read(12)
if magic[:2] == b"PK":
with zipfile.ZipFile(file_arg, 'r') as zipObj:
if os.path.exists(temp):
rmrf(temp)
os.mkdir(temp)
if os.path.exists(out):
rmrf(out)
os.mkdir(out)
print("Finding key... " + file_arg)
for zi in zipObj.infolist():
orgfilename = zi.filename
if "boot.img" in orgfilename:
zi.filename = "out"
zipObj.extract(zi, temp)
zi.filename = orgfilename
with open(os.path.join(temp, "out"), 'rb') as rr:
magic = rr.read(12)
if magic == b"OPPOENCRYPT!":
rr.seek(0x50)
data = rr.read(16)
key = keytest(data)
if key == -1:
print("Unknown AES key, reverse key from recovery first!")
return 1
else:
break
else:
print("Unknown mode2, boot.img wasn't encrypted")
break
print("Extracting... " + file_arg)
outzip = filename[:-4] + 'zip'
if os.path.exists(outzip):
os.remove(outzip)
with zipfile.ZipFile(outzip, 'w', zipfile.ZIP_DEFLATED) as WzipObj:
for zi in zipObj.infolist():
orgfilename = zi.filename
zi.filename = "out"
zipObj.extract(zi, temp)
zi.filename = orgfilename
with open(os.path.join(temp, "out"), 'rb') as rr:
magic = rr.read(12)
if magic == b"OPPOENCRYPT!":
print("Decrypting " + orgfilename)
if decryptfile2(key, os.path.join(temp, "out"),
os.path.join(temp, "out") + ".dec") == 1:
return 1
WzipObj.write(os.path.join(temp, "out") + ".dec", orgfilename)
os.remove(os.path.join(temp, "out"))
os.remove(os.path.join(temp, "out") + ".dec")
else:
WzipObj.write(os.path.join(temp, "out"), orgfilename)
os.remove(os.path.join(temp, "out"))
rmrf(temp)
print("DONE... file decrypted to: " + outzip)
return 0
print("ozipdecrypt 1.31 (c) B.Kerler 2017-2021")
filename = file_arg
with open(filename, 'rb') as fr:
magic = fr.read(12)
if magic == b"OPPOENCRYPT!":
pk = False
elif magic[:2] == b"PK":
pk = True
else:
print("ozip has unknown magic, OPPOENCRYPT! expected!")
return 1
if pk == False:
fr.seek(0x1050)
data = fr.read(16)
key = keytest(data)
if (key == -1):
print("Unknown AES key, reverse key from recovery first!")
return 1
ctx = AES.new(key, AES.MODE_ECB)
filename = file_arg[:-4] + "zip"
with open(filename, 'wb') as wf:
fr.seek(0x1050)
print("Decrypting...")
while True:
data = fr.read(16)
if len(data) == 0:
break
wf.write(ctx.decrypt(data))
data = fr.read(0x4000)
if len(data) == 0:
break
wf.write(data)
print("DONE!!")
else:
testkey = True
filename = os.path.abspath(file_arg)
path = os.path.abspath(os.path.dirname(filename))
outpath = os.path.join(path, "tmp")
if os.path.exists(outpath):
shutil.rmtree(outpath)
os.mkdir(outpath)
with zipfile.ZipFile(filename, 'r') as zo:
clist = []
try:
if zo.extract('oppo_metadata', outpath):
with open(os.path.join(outpath, 'oppo_metadata')) as rt:
for line in rt:
clist.append(line[:-1])
except Exception as e:
print(str(e))
print("Detected mode 2....")
return mode2(filename)
if testkey:
fname = ''
if "firmware-update/vbmeta.img" in clist:
fname = "firmware-update/vbmeta.img"
elif "vbmeta.img" in clist:
fname = 'vbmeta.img'
if fname != '':
if zo.extract(fname, outpath):
with open(os.path.join(outpath, fname.replace("/", os.sep)), "rb") as rt:
rt.seek(0x1050)
data = rt.read(16)
key = keytest(data)
if (key == -1):
print("Unknown AES key, reverse key from recovery first!")
return 1
testkey = False
if testkey == True:
print("Unknown image, please report an issue with image name!")
return 1
outzip = filename[:-4] + 'zip'
with zipfile.ZipFile(outzip, 'w', zipfile.ZIP_DEFLATED) as WzipObj:
for info in zo.infolist():
print("Extracting " + info.filename)
orgfilename = info.filename
info.filename = "out"
zo.extract(info, outpath)
info.filename = orgfilename
if len(clist) > 0:
if info.filename in clist:
decryptfile(key, os.path.join(outpath, "out"))
WzipObj.write(os.path.join(outpath, "out"), orgfilename)
else:
with open(os.path.join(outpath, "out"), 'rb') as rr:
magic = rr.read(12)
if magic == b"OPPOENCRYPT!":
decryptfile(key, os.path.join(outpath, "out"))
WzipObj.write(os.path.join(outpath, "out"), orgfilename)
print("DONE... files decrypted to: " + outzip)
return 0
if __name__ == '__main__':
import sys, argparse
parser = argparse.ArgumentParser(description="ozipdecrypt 1.3 (c) B.Kerler 2017-2021", add_help=False)
required = parser.add_argument_group('Required')
required.add_argument("filename", help="Path to ozip file")
optional = parser.add_argument_group('Optional')
optional.add_argument("-h", "--help", action="help", help="Show this help message and exit")
args = parser.parse_args()
sys.exit(main(args.filename))

308
rangelib.py Normal file
View File

@ -0,0 +1,308 @@
# Copyright (C) 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import heapq
import itertools
__all__ = ["RangeSet"]
class RangeSet(object):
"""A RangeSet represents a set of non-overlapping ranges on the
integers (ie, a set of integers, but efficient when the set contains
lots of runs.)"""
def __init__(self, data=None):
self.monotonic = False
if isinstance(data, str):
self._parse_internal(data)
elif data:
assert len(data) % 2 == 0
self.data = tuple(self._remove_pairs(data))
self.monotonic = all(x < y for x, y in zip(self.data, self.data[1:]))
else:
self.data = ()
def __iter__(self):
for i in range(0, len(self.data), 2):
yield self.data[i:i + 2]
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return self.data != other.data
def __nonzero__(self):
return bool(self.data)
def __str__(self):
if not self.data:
return "empty"
else:
return self.to_string()
def __repr__(self):
return '<RangeSet("' + self.to_string() + '")>'
@classmethod
def parse(cls, text):
"""Parse a text string consisting of a space-separated list of
blocks and ranges, eg "10-20 30 35-40". Ranges are interpreted to
include both their ends (so the above example represents 18
individual blocks. Returns a RangeSet object.)
If the input has all its blocks in increasing order, then returned
RangeSet will have an extra attribute 'monotonic' that is set to
True. For example the input "10-20 30" is monotonic, but the input
"15-20 30 10-14" is not, even though they represent the same set
of blocks (and the two RangeSets will compare equal with ==).
"""
return cls(text)
def _parse_internal(self, text):
data = []
last = -1
monotonic = True
for p in text.split():
if "-" in p:
s, e = (int(x) for x in p.split("-"))
data.append(s)
data.append(e + 1)
if last <= s <= e:
last = e
else:
monotonic = False
else:
s = int(p)
data.append(s)
data.append(s + 1)
if last <= s:
last = s + 1
else:
monotonic = False
data.sort()
self.data = tuple(self._remove_pairs(data))
self.monotonic = monotonic
@staticmethod
def _remove_pairs(source):
"""Remove consecutive duplicate items to simplify the result.
[1, 2, 2, 5, 5, 10] will become [1, 10]."""
last = None
for i in source:
if i == last:
last = None
else:
if last is not None:
yield last
last = i
if last is not None:
yield last
def to_string(self):
out = []
for i in range(0, len(self.data), 2):
s, e = self.data[i:i + 2]
if e == s + 1:
out.append(str(s))
else:
out.append(str(s) + "-" + str(e - 1))
return " ".join(out)
def to_string_raw(self):
assert self.data
return str(len(self.data)) + "," + ",".join(str(i) for i in self.data)
def union(self, other):
"""Return a new RangeSet representing the union of this RangeSet
with the argument.
>>> RangeSet("10-19 30-34").union(RangeSet("18-29"))
<RangeSet("10-34")>
>>> RangeSet("10-19 30-34").union(RangeSet("22 32"))
<RangeSet("10-19 22 30-34")>
"""
out = []
z = 0
for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
zip(other.data, itertools.cycle((+1, -1)))):
if (z == 0 and d == 1) or (z == 1 and d == -1):
out.append(p)
z += d
return RangeSet(data=out)
def intersect(self, other):
"""Return a new RangeSet representing the intersection of this
RangeSet with the argument.
>>> RangeSet("10-19 30-34").intersect(RangeSet("18-32"))
<RangeSet("18-19 30-32")>
>>> RangeSet("10-19 30-34").intersect(RangeSet("22-28"))
<RangeSet("")>
"""
out = []
z = 0
for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
zip(other.data, itertools.cycle((+1, -1)))):
if (z == 1 and d == 1) or (z == 2 and d == -1):
out.append(p)
z += d
return RangeSet(data=out)
def subtract(self, other):
"""Return a new RangeSet representing subtracting the argument
from this RangeSet.
>>> RangeSet("10-19 30-34").subtract(RangeSet("18-32"))
<RangeSet("10-17 33-34")>
>>> RangeSet("10-19 30-34").subtract(RangeSet("22-28"))
<RangeSet("10-19 30-34")>
"""
out = []
z = 0
for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
zip(other.data, itertools.cycle((-1, +1)))):
if (z == 0 and d == 1) or (z == 1 and d == -1):
out.append(p)
z += d
return RangeSet(data=out)
def overlaps(self, other):
"""Returns true if the argument has a nonempty overlap with this
RangeSet.
>>> RangeSet("10-19 30-34").overlaps(RangeSet("18-32"))
True
>>> RangeSet("10-19 30-34").overlaps(RangeSet("22-28"))
False
"""
# This is like intersect, but we can stop as soon as we discover the
# output is going to be nonempty.
z = 0
for _, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
zip(other.data, itertools.cycle((+1, -1)))):
if (z == 1 and d == 1) or (z == 2 and d == -1):
return True
z += d
return False
def size(self):
"""Returns the total size of the RangeSet (ie, how many integers
are in the set).
>>> RangeSet("10-19 30-34").size()
15
"""
total = 0
for i, p in enumerate(self.data):
if i % 2:
total += p
else:
total -= p
return total
def map_within(self, other):
"""'other' should be a subset of 'self'. Returns a RangeSet
representing what 'other' would get translated to if the integers
of 'self' were translated down to be contiguous starting at zero.
>>> RangeSet("0-9").map_within(RangeSet("3-4"))
<RangeSet("3-4")>
>>> RangeSet("10-19").map_within(RangeSet("13-14"))
<RangeSet("3-4")>
>>> RangeSet("10-19 30-39").map_within(RangeSet("17-19 30-32"))
<RangeSet("7-12")>
>>> RangeSet("10-19 30-39").map_within(RangeSet("12-13 17-19 30-32"))
<RangeSet("2-3 7-12")>
"""
out = []
offset = 0
start = None
for p, d in heapq.merge(zip(self.data, itertools.cycle((-5, +5))),
zip(other.data, itertools.cycle((-1, +1)))):
if d == -5:
start = p
elif d == +5:
offset += p - start
start = None
else:
out.append(offset + p - start)
return RangeSet(data=out)
def extend(self, n):
"""Extend the RangeSet by 'n' blocks.
The lower bound is guaranteed to be non-negative.
>>> RangeSet("0-9").extend(1)
<RangeSet("0-10")>
>>> RangeSet("10-19").extend(15)
<RangeSet("0-34")>
>>> RangeSet("10-19 30-39").extend(4)
<RangeSet("6-23 26-43")>
>>> RangeSet("10-19 30-39").extend(10)
<RangeSet("0-49")>
"""
out = self
for i in range(0, len(self.data), 2):
s, e = self.data[i:i + 2]
s1 = max(0, s - n)
e1 = e + n
out = out.union(RangeSet(str(s1) + "-" + str(e1 - 1)))
return out
def first(self, n):
"""Return the RangeSet that contains at most the first 'n' integers.
>>> RangeSet("0-9").first(1)
<RangeSet("0")>
>>> RangeSet("10-19").first(5)
<RangeSet("10-14")>
>>> RangeSet("10-19").first(15)
<RangeSet("10-19")>
>>> RangeSet("10-19 30-39").first(3)
<RangeSet("10-12")>
>>> RangeSet("10-19 30-39").first(15)
<RangeSet("10-19 30-34")>
>>> RangeSet("10-19 30-39").first(30)
<RangeSet("10-19 30-39")>
>>> RangeSet("0-9").first(0)
<RangeSet("")>
"""
if self.size() <= n:
return self
out = []
for s, e in self:
if e - s >= n:
out += (s, s + n)
break
else:
out += (s, e)
n -= e - s
return RangeSet(data=out)
if __name__ == "__main__":
import doctest
doctest.testmod()

289
sparse_img.py Normal file
View File

@ -0,0 +1,289 @@
# Copyright (C) 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import os
import sys
import struct
from hashlib import sha1
import rangelib
class SparseImage(object):
"""Wraps a sparse image file into an image object.
Wraps a sparse image file (and optional file map and clobbered_blocks) into
an image object suitable for passing to BlockImageDiff. file_map contains
the mapping between files and their blocks. clobbered_blocks contains the set
of blocks that should be always written to the target regardless of the old
contents (i.e. copying instead of patching). clobbered_blocks should be in
the form of a string like "0" or "0 1-5 8".
"""
def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
mode="rb", build_map=True):
self.simg_f = f = open(simg_fn, mode)
header_bin = f.read(28)
header = struct.unpack("<I4H4I", header_bin)
magic = header[0]
major_version = header[1]
minor_version = header[2]
file_hdr_sz = header[3]
chunk_hdr_sz = header[4]
self.blocksize = blk_sz = header[5]
self.total_blocks = total_blks = header[6]
self.total_chunks = total_chunks = header[7]
if magic != 0xED26FF3A:
raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
if major_version != 1 or minor_version != 0:
raise ValueError("I know about version 1.0, but this is version %u.%u" %
(major_version, minor_version))
if file_hdr_sz != 28:
raise ValueError("File header size was expected to be 28, but is %u." %
(file_hdr_sz,))
if chunk_hdr_sz != 12:
raise ValueError("Chunk header size was expected to be 12, but is %u." %
(chunk_hdr_sz,))
print("Total of %u %u-byte output blocks in %u input chunks."
% (total_blks, blk_sz, total_chunks))
if not build_map:
return
pos = 0 # in blocks
care_data = []
self.offset_map = offset_map = []
self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
for i in range(total_chunks):
header_bin = f.read(12)
header = struct.unpack("<2H2I", header_bin)
chunk_type = header[0]
chunk_sz = header[2]
total_sz = header[3]
data_sz = total_sz - 12
if chunk_type == 0xCAC1:
if data_sz != (chunk_sz * blk_sz):
raise ValueError(
"Raw chunk input size (%u) does not match output size (%u)" %
(data_sz, chunk_sz * blk_sz))
else:
care_data.append(pos)
care_data.append(pos + chunk_sz)
offset_map.append((pos, chunk_sz, f.tell(), None))
pos += chunk_sz
f.seek(data_sz, os.SEEK_CUR)
elif chunk_type == 0xCAC2:
fill_data = f.read(4)
care_data.append(pos)
care_data.append(pos + chunk_sz)
offset_map.append((pos, chunk_sz, None, fill_data))
pos += chunk_sz
elif chunk_type == 0xCAC3:
if data_sz != 0:
raise ValueError("Don't care chunk input size is non-zero (%u)" %
data_sz)
else:
pos += chunk_sz
elif chunk_type == 0xCAC4:
raise ValueError("CRC32 chunks are not supported")
else:
raise ValueError("Unknown chunk type 0x%04X not supported" %
(chunk_type,))
self.care_map = rangelib.RangeSet(care_data)
self.offset_index = [i[0] for i in offset_map]
# Bug: 20881595
# Introduce extended blocks as a workaround for the bug. dm-verity may
# touch blocks that are not in the care_map due to block device
# read-ahead. It will fail if such blocks contain non-zeroes. We zero out
# the extended blocks explicitly to avoid dm-verity failures. 512 blocks
# are the maximum read-ahead we configure for dm-verity block devices.
extended = self.care_map.extend(512)
all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
extended = extended.intersect(all_blocks).subtract(self.care_map)
self.extended = extended
if file_map_fn:
self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
else:
self.file_map = {"__DATA": self.care_map}
def AppendFillChunk(self, data, blocks):
f = self.simg_f
# Append a fill chunk
f.seek(0, os.SEEK_END)
f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data))
# Update the sparse header
self.total_blocks += blocks
self.total_chunks += 1
f.seek(16, os.SEEK_SET)
f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
def ReadRangeSet(self, ranges):
return [d for d in self._GetRangeData(ranges)]
def TotalSha1(self, include_clobbered_blocks=False):
"""Return the SHA-1 hash of all data in the 'care' regions.
If include_clobbered_blocks is True, it returns the hash including the
clobbered_blocks."""
ranges = self.care_map
if not include_clobbered_blocks:
ranges = ranges.subtract(self.clobbered_blocks)
h = sha1()
for d in self._GetRangeData(ranges):
h.update(d)
return h.hexdigest()
def _GetRangeData(self, ranges):
"""Generator that produces all the image data in 'ranges'. The
number of individual pieces returned is arbitrary (and in
particular is not necessarily equal to the number of ranges in
'ranges'.
This generator is stateful -- it depends on the open file object
contained in this SparseImage, so you should not try to run two
instances of this generator on the same object simultaneously."""
f = self.simg_f
for s, e in ranges:
to_read = e - s
idx = bisect.bisect_right(self.offset_index, s) - 1
chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
# for the first chunk we may be starting partway through it.
remain = chunk_len - (s - chunk_start)
this_read = min(remain, to_read)
if filepos is not None:
p = filepos + ((s - chunk_start) * self.blocksize)
f.seek(p, os.SEEK_SET)
yield f.read(this_read * self.blocksize)
else:
yield fill_data * (this_read * (self.blocksize >> 2))
to_read -= this_read
while to_read > 0:
# continue with following chunks if this range spans multiple chunks.
idx += 1
chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
this_read = min(chunk_len, to_read)
if filepos is not None:
f.seek(filepos, os.SEEK_SET)
yield f.read(this_read * self.blocksize)
else:
yield fill_data * (this_read * (self.blocksize >> 2))
to_read -= this_read
def LoadFileBlockMap(self, fn, clobbered_blocks):
remaining = self.care_map
self.file_map = out = {}
with open(fn) as f:
for line in f:
fn, ranges = line.split(None, 1)
ranges = rangelib.RangeSet.parse(ranges)
out[fn] = ranges
assert ranges.size() == ranges.intersect(remaining).size()
# Currently we assume that blocks in clobbered_blocks are not part of
# any file.
assert not clobbered_blocks.overlaps(ranges)
remaining = remaining.subtract(ranges)
remaining = remaining.subtract(clobbered_blocks)
# For all the remaining blocks in the care_map (ie, those that
# aren't part of the data for any file nor part of the clobbered_blocks),
# divide them into blocks that are all zero and blocks that aren't.
# (Zero blocks are handled specially because (1) there are usually
# a lot of them and (2) bsdiff handles files with long sequences of
# repeated bytes especially poorly.)
zero_blocks = []
nonzero_blocks = []
if sys.version_info[:2] >= (3, 0):
reference = bytes('\0' * self.blocksize, encoding="UTF-8")
else:
reference = '\0' * self.blocksize
# Workaround for bug 23227672. For squashfs, we don't have a system.map. So
# the whole system image will be treated as a single file. But for some
# unknown bug, the updater will be killed due to OOM when writing back the
# patched image to flash (observed on lenok-userdebug MEA49). Prior to
# getting a real fix, we evenly divide the non-zero blocks into smaller
# groups (currently 1024 blocks or 4MB per group).
# Bug: 23227672
MAX_BLOCKS_PER_GROUP = 1024
nonzero_groups = []
f = self.simg_f
for s, e in remaining:
for b in range(s, e):
idx = bisect.bisect_right(self.offset_index, b) - 1
chunk_start, _, filepos, fill_data = self.offset_map[idx]
if filepos is not None:
filepos += (b - chunk_start) * self.blocksize
f.seek(filepos, os.SEEK_SET)
data = f.read(self.blocksize)
else:
if fill_data == reference[:4]: # fill with all zeros
data = reference
else:
data = None
if data == reference:
zero_blocks.append(b)
zero_blocks.append(b + 1)
else:
nonzero_blocks.append(b)
nonzero_blocks.append(b + 1)
if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
nonzero_groups.append(nonzero_blocks)
# Clear the list.
nonzero_blocks = []
if nonzero_blocks:
nonzero_groups.append(nonzero_blocks)
nonzero_blocks = []
assert zero_blocks or nonzero_groups or clobbered_blocks
if zero_blocks:
out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
if nonzero_groups:
for i, blocks in enumerate(nonzero_groups):
out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
if clobbered_blocks:
out["__COPY"] = clobbered_blocks
def ResetFileMap(self):
"""Throw away the file map and treat the entire image as
undifferentiated data."""
self.file_map = {"__DATA": self.care_map}

485
utils.py Normal file
View File

@ -0,0 +1,485 @@
from __future__ import print_function
import struct
from os.path import exists
import os, errno, tempfile
import common
import blockimgdiff
import sparse_img
from threading import Thread
from random import randint, choice
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
# -----
# ====================================================
# FUNCTION: sdat2img img2sdat
# AUTHORS: xpirt - luxi78 - howellzhu
# DATE: 2018-10-27 10:33:21 CEST | 2018-05-25 12:19:12 CEST
# ====================================================
# -----
# ----VALUES
from os import getcwd
from lpunpack import SparseImage
elocal = getcwd()
dn = None
formats = ([b'PK', "zip"], [b'OPPOENCRYPT!', "ozip"], [b'7z', "7z"], [b'\x53\xef', 'ext', 1080],
[b'\x3a\xff\x26\xed', "sparse"], [b'\xe2\xe1\xf5\xe0', "erofs", 1024], [b"CrAU", "payload"],
[b"AVB0", "vbmeta"], [b'\xd7\xb7\xab\x1e', "dtbo"],
[b'\xd0\x0d\xfe\xed', "dtb"], [b"MZ", "exe"], [b".ELF", 'elf'],
[b"ANDROID!", "boot"], [b"VNDRBOOT", "vendor_boot"],
[b'AVBf', "avb_foot"], [b'BZh', "bzip2"],
[b'CHROMEOS', 'chrome'], [b'\x1f\x8b', "gzip"],
[b'\x1f\x9e', "gzip"], [b'\x02\x21\x4c\x18', "lz4_legacy"],
[b'\x03\x21\x4c\x18', 'lz4'], [b'\x04\x22\x4d\x18', 'lz4'],
[b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\x03', "zopfli"], [b'\xfd7zXZ', 'xz'],
[b']\x00\x00\x00\x04\xff\xff\xff\xff\xff\xff\xff\xff', 'lzma'], [b'\x02!L\x18', 'lz4_lg'],
[b'\x89PNG', 'png'], [b"LOGO!!!!", 'logo'])
# ----DEFS
class aesencrypt:
@staticmethod
def encrypt(key, file_path, outfile):
cipher = AES.new(key.encode("utf-8"), AES.MODE_ECB)
with open(outfile, "wb") as f, open(file_path, 'rb') as fd:
f.write(cipher.encrypt(pad(fd.read(), AES.block_size)))
@staticmethod
def decrypt(key, file_path, outfile):
cipher = AES.new(key.encode("utf-8"), AES.MODE_ECB)
with open(file_path, "rb") as f:
data = cipher.decrypt(f.read())
data = data[:-data[-1]]
with open(outfile, "wb") as f:
f.write(data)
def sdat2img(TRANSFER_LIST_FILE, NEW_DATA_FILE, OUTPUT_IMAGE_FILE):
__version__ = '1.2'
print('sdat2img binary - version: {}\n'.format(__version__))
def rangeset(src):
src_set = src.split(',')
num_set = [int(item) for item in src_set]
if len(num_set) != num_set[0] + 1:
print('Error on parsing following data to rangeset:\n{}'.format(src))
return
return tuple([(num_set[i], num_set[i + 1]) for i in range(1, len(num_set), 2)])
def parse_transfer_list_file(path):
with open(TRANSFER_LIST_FILE, 'r') as trans_list:
# First line in transfer list is the version number
# Second line in transfer list is the total number of blocks we expect to write
new_blocks = int(trans_list.readline())
if version := int(trans_list.readline()) >= 2:
# Third line is how many stash entries are needed simultaneously
trans_list.readline()
# Fourth line is the maximum number of blocks that will be stashed simultaneously
trans_list.readline()
# Subsequent lines are all individual transfer commands
commands = []
for line in trans_list:
line = line.split(' ')
cmd = line[0]
if cmd in ['erase', 'new', 'zero']:
commands.append([cmd, rangeset(line[1])])
else:
# Skip lines starting with numbers, they are not commands anyway
if not cmd[0].isdigit():
print('Command "{}" is not valid.'.format(cmd))
return
return version, new_blocks, commands
version, new_blocks, commands = parse_transfer_list_file(TRANSFER_LIST_FILE)
show = "Android {} detected!\n"
if version == 1:
print(show.format("Lollipop 5.0"))
elif version == 2:
print(show.format("Lollipop 5.1"))
elif version == 3:
print(show.format("Marshmallow 6.x"))
elif version == 4:
print(show.format("Nougat 7.x / Oreo 8.x / Pie 9.x"))
else:
print(f'Unknown Android version {version}!\n')
# Don't clobber existing files to avoid accidental data loss
try:
output_img = open(OUTPUT_IMAGE_FILE, 'wb')
except IOError as e:
if e.errno == errno.EEXIST:
print('Error: the output file "{}" already exists'.format(e.filename))
print('Remove it, rename it, or choose a different file name.')
return e.errno
else:
raise
new_data_file = open(NEW_DATA_FILE, 'rb')
all_block_sets = [i for command in commands for i in command[1]]
max_file_size = max(pair[1] for pair in all_block_sets) * (BLOCK_SIZE := 4096)
for command in commands:
if command[0] == 'new':
for block in command[1]:
begin = block[0]
end = block[1]
block_count = end - begin
print('Copying {} blocks into position {}...'.format(block_count, begin))
# Position output file
output_img.seek(begin * BLOCK_SIZE)
# Copy one block at a time
while block_count > 0:
output_img.write(new_data_file.read(BLOCK_SIZE))
block_count -= 1
else:
print('Skipping command {}...'.format(command[0]))
# Make file larger if necessary
if output_img.tell() < max_file_size:
output_img.truncate(max_file_size)
output_img.close()
new_data_file.close()
print('Done! Output image: {}'.format(os.path.realpath(output_img.name)))
def gettype(file) -> str:
if not os.path.exists(file):
return "fne"
def compare(header: bytes, number: int = 0) -> int:
with open(file, 'rb') as f:
f.seek(number)
return f.read(len(header)) == header
def is_super(fil) -> any:
with open(fil, 'rb') as file_:
buf = bytearray(file_.read(4))
if len(buf) < 4:
return False
file_.seek(0, 0)
while buf[0] == 0x00:
buf = bytearray(file_.read(1))
try:
file_.seek(-1, 1)
except:
return False
buf += bytearray(file_.read(4))
return buf[1:] == b'\x67\x44\x6c\x61'
try:
if is_super(file):
return 'super'
except IndexError:
pass
for f_ in formats:
if len(f_) == 2:
if compare(f_[0]):
return f_[1]
elif len(f_) == 3:
if compare(f_[0], f_[2]):
return f_[1]
return "unknow"
def dynamic_list_reader(path):
data = {}
with open(path, 'r', encoding='utf-8') as l_f:
for p in l_f.readlines():
if p[:1] == '#':
continue
tmp = p.strip().split()
if tmp[0] == 'remove_all_groups':
data.clear()
elif tmp[0] == 'add_group':
data[tmp[1]] = {}
data[tmp[1]]['size'] = tmp[2]
data[tmp[1]]['parts'] = []
elif tmp[0] == 'add':
data[tmp[2]]['parts'].append(tmp[1])
else:
print(f"Skip {tmp}")
return data
def generate_dynamic_list(dbfz, size, set_, lb, work):
data = ['# Remove all existing dynamic partitions and groups before applying full OTA', 'remove_all_groups']
with open(work + "dynamic_partitions_op_list", 'w', encoding='utf-8', newline='\n') as d_list:
if set_ == 1:
data.append(f'# Add group {dbfz} with maximum size {size}')
data.append(f'add_group {dbfz} {size}')
elif set_ in [2, 3]:
data.append(f'# Add group {dbfz}_a with maximum size {size}')
data.append(f'add_group {dbfz}_a {size}')
data.append(f'# Add group {dbfz}_b with maximum size {size}')
data.append(f'add_group {dbfz}_b {size}')
for part in lb:
if set_ == 1:
data.append(f'# Add partition {part} to group {dbfz}')
data.append(f'add {part} {dbfz}')
elif set_ in [2, 3]:
data.append(f'# Add partition {part}_a to group {dbfz}_a')
data.append(f'add {part}_a {dbfz}_a')
data.append(f'# Add partition {part}_b to group {dbfz}_b')
data.append(f'add {part}_b {dbfz}_b')
for part in lb:
if set_ == 1:
data.append(f'# Grow partition {part} from 0 to {os.path.getsize(work + part + ".img")}')
data.append(f'resize {part} {os.path.getsize(work + part + ".img")}')
elif set_ in [2, 3]:
data.append(f'# Grow partition {part}_a from 0 to {os.path.getsize(work + part + ".img")}')
data.append(f'resize {part}_a {os.path.getsize(work + part + ".img")}')
d_list.writelines([key + "\n" for key in data])
data.clear()
def v_code(num=6) -> str:
ret = ""
for i in range(num):
num = randint(0, 9)
# num = chr(random.randint(48,57))#ASCII表示数字
letter = chr(randint(97, 122)) # 取小写字母
Letter = chr(randint(65, 90)) # 取大写字母
s = str(choice([num, letter, Letter]))
ret += s
return ret
def qc(file_) -> None:
if not exists(file_):
return
with open(file_, 'r+', encoding='utf-8', newline='\n') as f:
data = f.readlines()
new_data = sorted(set(data), key=data.index)
if len(new_data) == len(data):
print("No need to handle")
return
f.seek(0)
f.truncate()
f.writelines(new_data)
del data, new_data
def cz(func, *args):
Thread(target=func, args=args, daemon=True).start()
def simg2img(path):
with open(path, 'rb') as fd:
if SparseImage(fd).check():
print('Sparse image detected.')
print('Process conversion to non sparse image...')
unsparse_file = SparseImage(fd).unsparse()
print('Result:[ok]')
else:
print(f"{path} not Sparse.Skip!")
try:
if os.path.exists(unsparse_file):
os.remove(path)
os.rename(unsparse_file, path)
except Exception as e:
print(e)
def img2sdat(input_image, out_dir='.', version=None, prefix='system'):
print('img2sdat binary - version: %s\n' % 1.7)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
'''
1. Android Lollipop 5.0
2. Android Lollipop 5.1
3. Android Marshmallow 6.0
4. Android Nougat 7.0/7.1/8.0/8.1
'''
blockimgdiff.BlockImageDiff(sparse_img.SparseImage(input_image, tempfile.mkstemp()[1], '0'), None, version).Compute(
out_dir + '/' + prefix)
print('Done! Output files: %s' % os.path.dirname(prefix))
def findfile(file, dir_) -> str:
for root, dirs, files in os.walk(dir_, topdown=True):
if file in files:
if os.name == 'nt':
return (root + os.sep + file).replace("\\", '/')
else:
return root + os.sep + file
else:
pass
def findfolder(dir__, folder_name):
for root, dirnames, filenames in os.walk(dir__):
for dirname in dirnames:
if dirname == folder_name:
return os.path.join(root, dirname).replace("\\", '/')
return None
# ----CLASSES
class jzxs(object):
def __init__(self, master):
self.master = master
self.set()
def set(self):
self.master.geometry('+{}+{}'.format(int(self.master.winfo_screenwidth() / 2 - self.master.winfo_width() / 2),
int(self.master.winfo_screenheight() / 2 - self.master.winfo_height() / 2)))
class vbpatch:
def __init__(self, file_):
self.file = file_
def checkmagic(self):
if os.access(self.file, os.F_OK):
magic = b'AVB0'
with open(self.file, "rb") as f:
buf = f.read(4)
return magic == buf
else:
print("File dose not exist!")
def readflag(self):
if not self.checkmagic():
return False
if os.access(self.file, os.F_OK):
with open(self.file, "rb") as f:
f.seek(123, 0)
flag = f.read(1)
if flag == b'\x00':
return 0 # Verify boot and dm-verity is on
elif flag == b'\x01':
return 1 # Verify boot but dm-verity is off
elif flag == b'\x02':
return 2 # All verity is off
else:
return flag
else:
print("File does not exist!")
def patchvb(self, flag):
if not self.checkmagic():
return False
if os.access(self.file, os.F_OK):
with open(self.file, 'rb+') as f:
f.seek(123, 0)
f.write(flag)
print("Done!")
else:
print("File not Found")
def restore(self):
self.patchvb(b'\x00')
def disdm(self):
self.patchvb(b'\x01')
def disavb(self):
self.patchvb(b'\x02')
class DUMPCFG:
blksz = 0x1 << 0xc
headoff = 0x4000
magic = b"LOGO!!!!"
imgnum = 0
imgblkoffs = []
imgblkszs = []
class BMPHEAD(object):
def __init__(self, buf: bytes = None): # Read bytes buf and use this struct to parse
assert buf is not None, f"buf Should be bytes not {type(buf)}"
# print(buf)
self.structstr = "<H6I"
(
self.magic,
self.fsize,
self.reserved,
self.hsize,
self.dib,
self.width,
self.height,
) = struct.unpack(self.structstr, buf)
class XIAOMI_BLKSTRUCT(object):
def __init__(self, buf: bytes):
self.structstr = "2I"
(
self.imgoff,
self.blksz,
) = struct.unpack(self.structstr, buf)
class LOGODUMPER(object):
def __init__(self, img: str, out: str, dir__: str = "pic"):
self.out = out
self.img = img
self.dir = dir__
self.structstr = "<8s"
self.cfg = DUMPCFG()
self.chkimg(img)
def chkimg(self, img: str):
assert os.access(img, os.F_OK), f"{img} does not found!"
with open(img, 'rb') as f:
f.seek(self.cfg.headoff, 0)
self.magic = struct.unpack(
self.structstr, f.read(struct.calcsize(self.structstr))
)[0]
while True:
m = XIAOMI_BLKSTRUCT(f.read(8))
if m.imgoff != 0:
self.cfg.imgblkszs.append(m.blksz << 0xc)
self.cfg.imgblkoffs.append(m.imgoff << 0xc)
self.cfg.imgnum += 1
else:
break
# print(self.magic)
assert self.magic == b"LOGO!!!!", "File does not match xiaomi logo magic!"
print("Xiaomi LOGO!!!! format check pass!")
def unpack(self):
with open(self.img, 'rb') as f:
print("Unpack:\n"
"BMP\tSize\tWidth\tHeight")
for i in range(self.cfg.imgnum):
f.seek(self.cfg.imgblkoffs[i], 0)
bmph = BMPHEAD(f.read(26))
f.seek(self.cfg.imgblkoffs[i], 0)
print("%d\t%d\t%d\t%d" % (i, bmph.fsize, bmph.width, bmph.height))
with open(os.path.join(self.out, "%d.bmp" % i), 'wb') as o:
o.write(f.read(bmph.fsize))
print("\tDone!")
def repack(self):
with open(self.out, 'wb') as o:
off = 0x5
for i in range(self.cfg.imgnum):
print("Write BMP [%d.bmp] at offset 0x%X" % (i, off << 0xc))
with open(os.path.join(self.dir, "%d.bmp" % i), 'rb') as b:
bhead = BMPHEAD(b.read(26))
b.seek(0, 0)
self.cfg.imgblkszs[i] = (bhead.fsize >> 0xc) + 1
self.cfg.imgblkoffs[i] = off
o.seek(off << 0xc)
o.write(b.read(bhead.fsize))
off += self.cfg.imgblkszs[i]
o.seek(self.cfg.headoff)
o.write(self.magic)
for i in range(self.cfg.imgnum):
o.write(struct.pack("<I", self.cfg.imgblkoffs[i]))
o.write(struct.pack("<I", self.cfg.imgblkszs[i]))
print("\tDone!")