initial code for dumping imessages in a reasonable format
This commit is contained in:
268
dump-imessages/iphone-dataprotection/python_scripts/hfs/btree.py
Normal file
268
dump-imessages/iphone-dataprotection/python_scripts/hfs/btree.py
Normal file
@@ -0,0 +1,268 @@
|
||||
from structs import *
|
||||
|
||||
"""
|
||||
Probably buggy
|
||||
HAX, only works on case SENSITIVE
|
||||
"""
|
||||
|
||||
class BTree(object):
|
||||
def __init__(self, file, keyStruct, dataStruct):
|
||||
self.file = file
|
||||
self.keyStruct = keyStruct
|
||||
self.dataStruct = dataStruct
|
||||
block0 = self.file.readBlock(0)
|
||||
btnode = BTNodeDescriptor.parse(block0)
|
||||
assert btnode.kind == kBTHeaderNode
|
||||
self.header = BTHeaderRec.parse(block0[BTNodeDescriptor.sizeof():])
|
||||
assert self.header.keyCompareType == 0 or self.header.keyCompareType == 0 or kHFSBinaryCompare
|
||||
#TODO: do more testing when nodeSize != blockSize
|
||||
self.nodeSize = self.header.nodeSize
|
||||
self.nodesInBlock = file.blockSize / self.header.nodeSize
|
||||
self.blocksForNode = self.header.nodeSize / file.blockSize
|
||||
#print file.blockSize , self.header.nodeSize
|
||||
self.lastRecordNumber = 0
|
||||
type, (hdr, maprec) = self.readBtreeNode(0)
|
||||
assert len(maprec) == self.nodeSize - 256
|
||||
if self.header.totalNodes / 8 > len(maprec):
|
||||
pass #TODO: handle map records
|
||||
self.maprec = maprec
|
||||
|
||||
def isNodeInUse(self, nodeNumber):
|
||||
thisByte = ord(self.maprec[nodeNumber / 8])
|
||||
return (thisByte & (1 << (7 - (nodeNumber % 8)))) != 0
|
||||
|
||||
def readEmptySpace(self):
|
||||
res = ""
|
||||
z = 0
|
||||
for i in xrange(self.header.totalNodes):
|
||||
if not self.isNodeInUse(i):
|
||||
z += 1
|
||||
res += self.readNode(i)
|
||||
assert z == self.header.freeNodes
|
||||
return res
|
||||
|
||||
#convert construct structure to tuple
|
||||
def getComparableKey(self, k):
|
||||
raise Exception("implement in subclass")
|
||||
|
||||
def compareKeys(self, k1, k2):
|
||||
k2 = self.getComparableKey(k2)
|
||||
if k1 == k2:
|
||||
return 0
|
||||
return -1 if k1 < k2 else 1
|
||||
|
||||
def printLeaf(self, key, data):
|
||||
print key, data
|
||||
|
||||
def readNode(self, nodeNumber):
|
||||
node = ""
|
||||
for i in xrange(self.blocksForNode):
|
||||
node += self.file.readBlock(nodeNumber * self.blocksForNode + i)
|
||||
return node
|
||||
|
||||
def readBtreeNode(self, nodeNumber):
|
||||
self.lastnodeNumber = nodeNumber
|
||||
node = self.readNode(nodeNumber)
|
||||
self.lastbtnode = btnode = BTNodeDescriptor.parse(node)
|
||||
|
||||
if btnode.kind == kBTHeaderNode:
|
||||
assert btnode.numRecords == 3
|
||||
end = self.nodeSize - 8 #2*4
|
||||
offsets = Array(btnode.numRecords+1, UBInt16("off")).parse(node[end:])
|
||||
assert offsets[-4] == end
|
||||
hdr = BTHeaderRec.parse(node[BTNodeDescriptor.sizeof():])
|
||||
maprec = node[offsets[-3]:end]
|
||||
return kBTHeaderNode, [hdr, maprec]
|
||||
elif btnode.kind == kBTIndexNode:
|
||||
recs = []
|
||||
offsets = Array(btnode.numRecords, UBInt16("off")).parse(node[-2*btnode.numRecords:])
|
||||
for i in xrange(btnode.numRecords):
|
||||
off = offsets[btnode.numRecords-i-1]
|
||||
k = self.keyStruct.parse(node[off:])
|
||||
off += 2 + k.keyLength
|
||||
k.childNode = UBInt32("nodeNumber").parse(node[off:off+4])
|
||||
recs.append(k)
|
||||
return kBTIndexNode, recs
|
||||
elif btnode.kind == kBTLeafNode:
|
||||
recs = []
|
||||
offsets = Array(btnode.numRecords, UBInt16("off")).parse(node[-2*btnode.numRecords:])
|
||||
for i in xrange(btnode.numRecords):
|
||||
off = offsets[btnode.numRecords-i-1]
|
||||
k = self.keyStruct.parse(node[off:])
|
||||
off += 2 + k.keyLength
|
||||
d = self.dataStruct.parse(node[off:])
|
||||
recs.append((k,d))
|
||||
return kBTLeafNode, recs
|
||||
else:
|
||||
raise Exception("Invalid node type " + str(btnode))
|
||||
|
||||
def search(self, searchKey, node=None):
|
||||
if node == None:
|
||||
node = self.header.rootNode
|
||||
|
||||
type, stuff = self.readBtreeNode(node)
|
||||
if len(stuff) == 0:
|
||||
return None, None
|
||||
|
||||
if type == kBTIndexNode:
|
||||
for i in xrange(len(stuff)):
|
||||
if self.compareKeys(searchKey, stuff[i]) < 0:
|
||||
if i > 0:
|
||||
i = i - 1
|
||||
return self.search(searchKey, stuff[i].childNode)
|
||||
return self.search(searchKey, stuff[len(stuff)-1].childNode)
|
||||
elif type == kBTLeafNode:
|
||||
self.lastRecordNumber = 0
|
||||
for k,v in stuff:
|
||||
res = self.compareKeys(searchKey, k)
|
||||
if res == 0:
|
||||
return k, v
|
||||
if res < 0:
|
||||
return None, None
|
||||
self.lastRecordNumber += 1
|
||||
return None, None
|
||||
|
||||
def traverse(self, node=None, count=0, callback=None):
|
||||
if node == None:
|
||||
node = self.header.rootNode
|
||||
|
||||
type, stuff = self.readBtreeNode(node)
|
||||
|
||||
if type == kBTIndexNode:
|
||||
for i in xrange(len(stuff)):
|
||||
count += self.traverse(stuff[i].childNode, callback=callback)
|
||||
elif type == kBTLeafNode:
|
||||
for k,v in stuff:
|
||||
if callback:
|
||||
callback(k,v)
|
||||
else:
|
||||
self.printLeaf(k, v)
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def traverseLeafNodes(self, callback=None):
|
||||
nodeNumber = self.header.firstLeafNode
|
||||
count = 0
|
||||
while nodeNumber != 0:
|
||||
_, stuff = self.readBtreeNode(nodeNumber)
|
||||
count += len(stuff)
|
||||
for k,v in stuff:
|
||||
if callback:
|
||||
callback(k,v)
|
||||
else:
|
||||
self.printLeaf(k, v)
|
||||
nodeNumber = self.lastbtnode.fLink
|
||||
return count
|
||||
|
||||
#XXX
|
||||
def searchMultiple(self, searchKey, filterKeyFunction=lambda x:False):
|
||||
self.search(searchKey)
|
||||
nodeNumber = self.lastnodeNumber
|
||||
recordNumber = self.lastRecordNumber
|
||||
kv = []
|
||||
while nodeNumber != 0:
|
||||
_, stuff = self.readBtreeNode(nodeNumber)
|
||||
for k,v in stuff[recordNumber:]:
|
||||
if filterKeyFunction(k):
|
||||
kv.append((k,v))
|
||||
else:
|
||||
return kv
|
||||
nodeNumber = self.lastbtnode.fLink
|
||||
recordNumber = 0
|
||||
return kv
|
||||
|
||||
def getLBAsHax(self):
|
||||
nodes = [self.lastnodeNumber]
|
||||
n = self.lastbtnode
|
||||
for i in xrange(2):
|
||||
nodes.append(self.lastbtnode.bLink)
|
||||
self.readBtreeNode(self.lastbtnode.bLink)
|
||||
self.lastbtnode = n
|
||||
for i in xrange(2):
|
||||
nodes.append(self.lastbtnode.fLink)
|
||||
self.readBtreeNode(self.lastbtnode.fLink)
|
||||
res = []
|
||||
for n in nodes:
|
||||
res.append(self.file.getLBAforBlock(n * self.blocksForNode))
|
||||
return res
|
||||
|
||||
class CatalogTree(BTree):
|
||||
def __init__(self, file, volume):
|
||||
super(CatalogTree,self).__init__(file, HFSPlusCatalogKey, HFSPlusCatalogData)
|
||||
self.volume = volume
|
||||
|
||||
def printLeaf(self, k, d):
|
||||
if d.recordType == kHFSPlusFolderRecord or d.recordType == kHFSPlusFileRecord:
|
||||
print getString(k)
|
||||
|
||||
def getComparableKey(self, k2):
|
||||
#XXX http://dubeiko.com/development/FileSystems/HFSPLUS/tn1150.html#StringComparisonAlgorithm
|
||||
return (k2.parentID, getString(k2))
|
||||
|
||||
def searchByCNID(self, cnid):
|
||||
threadk, threadd = self.search((cnid, ""))
|
||||
return self.search((threadd.data.parentID, getString(threadd.data))) if threadd else (None, None)
|
||||
|
||||
def getFolderContents(self, cnid):
|
||||
return self.searchMultiple((cnid, ""), lambda k:k.parentID == cnid)
|
||||
|
||||
def getRecordFromPath(self, path):
|
||||
if not path.startswith("/"):
|
||||
return None, None
|
||||
if path == "/":
|
||||
return self.searchByCNID(kHFSRootFolderID)
|
||||
parentId=kHFSRootFolderID
|
||||
i = 1
|
||||
k, v = None, None
|
||||
for p in path.split("/")[1:]:
|
||||
if p == "":
|
||||
break
|
||||
k,v = self.search((parentId, p))
|
||||
if (k,v) == (None, None):
|
||||
return None, None
|
||||
|
||||
if v.recordType == kHFSPlusFolderRecord:
|
||||
parentId = v.data.folderID
|
||||
elif v.recordType == kHFSPlusFileRecord and is_symlink(v.data):
|
||||
linkdata = self.volume.readFileByRecord(v)
|
||||
print "symlink %s => %s" % (p, linkdata)
|
||||
if not linkdata:
|
||||
return None, None
|
||||
t = path.split("/")
|
||||
t[i] = linkdata
|
||||
newpath = "/".join(t)
|
||||
return self.getRecordFromPath(newpath)
|
||||
else:
|
||||
break
|
||||
i += 1
|
||||
return k,v
|
||||
|
||||
class ExtentsOverflowTree(BTree):
|
||||
def __init__(self, file):
|
||||
super(ExtentsOverflowTree,self).__init__(file, HFSPlusExtentKey, HFSPlusExtentRecord)
|
||||
|
||||
def getComparableKey(self, k2):
|
||||
return (k2.fileID, k2.forkType, k2.startBlock)
|
||||
|
||||
def searchExtents(self, fileID, forkType, startBlock):
|
||||
return self.search((fileID, forkType, startBlock))
|
||||
|
||||
class AttributesTree(BTree):
|
||||
def __init__(self, file):
|
||||
super(AttributesTree,self).__init__(file, HFSPlusAttrKey, HFSPlusAttrData)
|
||||
|
||||
def printLeaf(self, k, d):
|
||||
print k.fileID, getString(k), d.data.encode("hex")
|
||||
|
||||
def getComparableKey(self, k2):
|
||||
return (k2.fileID, getString(k2))
|
||||
|
||||
def searchXattr(self, fileID, name):
|
||||
k,v = self.search((fileID, name))
|
||||
return v.data if v else None
|
||||
|
||||
def getAllXattrs(self, fileID):
|
||||
res = {}
|
||||
for k,v in self.searchMultiple((fileID, ""), lambda k:k.fileID == fileID):
|
||||
res[getString(k)] = v.data
|
||||
return res
|
||||
220
dump-imessages/iphone-dataprotection/python_scripts/hfs/emf.py
Normal file
220
dump-imessages/iphone-dataprotection/python_scripts/hfs/emf.py
Normal file
@@ -0,0 +1,220 @@
|
||||
from construct import Struct, ULInt16, ULInt32, String
|
||||
from construct.macros import ULInt64, Padding, If
|
||||
from crypto.aes import AESencryptCBC, AESdecryptCBC
|
||||
from hfs import HFSVolume, HFSFile
|
||||
from keystore.keybag import Keybag
|
||||
from structs import HFSPlusVolumeHeader, kHFSPlusFileRecord, getString, \
|
||||
kHFSRootParentID
|
||||
from util import search_plist
|
||||
from util.bruteforce import loadKeybagFromVolume
|
||||
import hashlib
|
||||
import os
|
||||
import plistlib
|
||||
import struct
|
||||
|
||||
"""
|
||||
iOS >= 4 raw images
|
||||
http://opensource.apple.com/source/xnu/xnu-1699.22.73/bsd/hfs/hfs_cprotect.c
|
||||
http://opensource.apple.com/source/xnu/xnu-1699.22.73/bsd/sys/cprotect.h
|
||||
"""
|
||||
|
||||
cp_root_xattr = Struct("cp_root_xattr",
|
||||
ULInt16("major_version"),
|
||||
ULInt16("minor_version"),
|
||||
ULInt64("flags"),
|
||||
ULInt32("reserved1"),
|
||||
ULInt32("reserved2"),
|
||||
ULInt32("reserved3"),
|
||||
ULInt32("reserved4")
|
||||
)
|
||||
|
||||
cprotect_xattr = Struct("cprotect_xattr",
|
||||
ULInt16("xattr_major_version"),
|
||||
ULInt16("xattr_minor_version"),
|
||||
ULInt32("flags"),
|
||||
ULInt32("persistent_class"),
|
||||
ULInt32("key_size"),
|
||||
If(lambda ctx: ctx["xattr_major_version"] >= 4, Padding(20)),
|
||||
String("persistent_key", length=lambda ctx: ctx["key_size"])
|
||||
)
|
||||
NSProtectionNone = 4
|
||||
|
||||
PROTECTION_CLASSES={
|
||||
1:"NSFileProtectionComplete",
|
||||
2:"NSFileProtectionCompleteUnlessOpen",
|
||||
3:"NSFileProtectionCompleteUntilFirstUserAuthentication",
|
||||
4:"NSFileProtectionNone",
|
||||
5:"NSFileProtectionRecovery?"
|
||||
}
|
||||
|
||||
#HAX: flags set in finderInfo[3] to tell if the image was already decrypted
|
||||
FLAG_DECRYPTING = 0x454d4664 #EMFd big endian
|
||||
FLAG_DECRYPTED = 0x454d4644 #EMFD big endian
|
||||
|
||||
class EMFFile(HFSFile):
|
||||
def __init__(self, volume, hfsplusfork, fileID, filekey, deleted=False):
|
||||
super(EMFFile,self).__init__(volume, hfsplusfork, fileID, deleted)
|
||||
self.filekey = filekey
|
||||
self.ivkey = None
|
||||
self.decrypt_offset = 0
|
||||
if volume.cp_major_version == 4:
|
||||
self.ivkey = hashlib.sha1(filekey).digest()[:16]
|
||||
|
||||
def processBlock(self, block, lba):
|
||||
iv = self.volume.ivForLBA(lba)
|
||||
ciphertext = AESencryptCBC(block, self.volume.emfkey, iv)
|
||||
if not self.ivkey:
|
||||
clear = AESdecryptCBC(ciphertext, self.filekey, iv)
|
||||
else:
|
||||
clear = ""
|
||||
for i in xrange(len(block)/0x1000):
|
||||
iv = self.volume.ivForLBA(self.decrypt_offset, False)
|
||||
iv = AESencryptCBC(iv, self.ivkey)
|
||||
clear += AESdecryptCBC(ciphertext[i*0x1000:(i+1)*0x1000], self.filekey,iv)
|
||||
self.decrypt_offset += 0x1000
|
||||
return clear
|
||||
|
||||
def decryptFile(self):
|
||||
self.decrypt_offset = 0
|
||||
bs = self.volume.blockSize
|
||||
for extent in self.extents:
|
||||
for i in xrange(extent.blockCount):
|
||||
lba = extent.startBlock+i
|
||||
data = self.volume.readBlock(lba)
|
||||
if len(data) == bs:
|
||||
clear = self.processBlock(data, lba)
|
||||
self.volume.writeBlock(lba, clear)
|
||||
|
||||
|
||||
class EMFVolume(HFSVolume):
|
||||
def __init__(self, bdev, device_infos, **kwargs):
|
||||
super(EMFVolume,self).__init__(bdev, **kwargs)
|
||||
volumeid = self.volumeID().encode("hex")
|
||||
|
||||
if not device_infos:
|
||||
dirname = os.path.dirname(bdev.filename)
|
||||
device_infos = search_plist(dirname, {"dataVolumeUUID":volumeid})
|
||||
if not device_infos:
|
||||
raise Exception("Missing keyfile")
|
||||
try:
|
||||
self.emfkey = None
|
||||
if device_infos.has_key("EMF"):
|
||||
self.emfkey = device_infos["EMF"].decode("hex")
|
||||
self.lbaoffset = device_infos["dataVolumeOffset"]
|
||||
self.keybag = Keybag.createWithPlist(device_infos)
|
||||
except:
|
||||
raise #Exception("Invalid keyfile")
|
||||
|
||||
rootxattr = self.getXattr(kHFSRootParentID, "com.apple.system.cprotect")
|
||||
self.decrypted = (self.header.finderInfo[3] == FLAG_DECRYPTED)
|
||||
self.cp_major_version = None
|
||||
self.cp_root = None
|
||||
if rootxattr == None:
|
||||
print "(No root com.apple.system.cprotect xattr)"
|
||||
else:
|
||||
self.cp_root = cp_root_xattr.parse(rootxattr)
|
||||
ver = self.cp_root.major_version
|
||||
print "cprotect version : %d (iOS %d)" % (ver, 4 + int(ver != 2))
|
||||
assert self.cp_root.major_version == 2 or self.cp_root.major_version == 4
|
||||
self.cp_major_version = self.cp_root.major_version
|
||||
self.keybag = loadKeybagFromVolume(self, device_infos)
|
||||
|
||||
def ivForLBA(self, lba, add=True):
|
||||
iv = ""
|
||||
if add:
|
||||
lba = lba + self.lbaoffset
|
||||
lba &= 0xffffffff
|
||||
for _ in xrange(4):
|
||||
if (lba & 1):
|
||||
lba = 0x80000061 ^ (lba >> 1);
|
||||
else:
|
||||
lba = lba >> 1;
|
||||
iv += struct.pack("<L", lba)
|
||||
return iv
|
||||
|
||||
def getFileKeyForCprotect(self, cp):
|
||||
if self.cp_major_version == None:
|
||||
self.cp_major_version = struct.unpack("<H", cp[:2])[0]
|
||||
cprotect = cprotect_xattr.parse(cp)
|
||||
return self.keybag.unwrapKeyForClass(cprotect.persistent_class, cprotect.persistent_key)
|
||||
|
||||
def getFileKeyForFileId(self, fileid):
|
||||
cprotect = self.getXattr(fileid, "com.apple.system.cprotect")
|
||||
if cprotect == None:
|
||||
return None
|
||||
return self.getFileKeyForCprotect(cprotect)
|
||||
|
||||
def readFile(self, path, outFolder="./", returnString=False):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
if not v:
|
||||
print "File %s not found" % path
|
||||
return
|
||||
assert v.recordType == kHFSPlusFileRecord
|
||||
cprotect = self.getXattr(v.data.fileID, "com.apple.system.cprotect")
|
||||
if cprotect == None or not self.cp_root or self.decrypted:
|
||||
#print "cprotect attr not found, reading normally"
|
||||
return super(EMFVolume, self).readFile(path, returnString=returnString)
|
||||
filekey = self.getFileKeyForCprotect(cprotect)
|
||||
if not filekey:
|
||||
print "Cannot unwrap file key for file %s protection_class=%d" % (path, cprotect_xattr.parse(cprotect).persistent_class)
|
||||
return
|
||||
f = EMFFile(self, v.data.dataFork, v.data.fileID, filekey)
|
||||
if returnString:
|
||||
return f.readAllBuffer()
|
||||
f.readAll(outFolder + os.path.basename(path))
|
||||
return True
|
||||
|
||||
def flagVolume(self, flag):
|
||||
self.header.finderInfo[3] = flag
|
||||
h = HFSPlusVolumeHeader.build(self.header)
|
||||
return self.bdev.write(0x400, h)
|
||||
|
||||
def decryptAllFiles(self):
|
||||
if self.header.finderInfo[3] == FLAG_DECRYPTING:
|
||||
print "Volume is half-decrypted, aborting (finderInfo[3] == FLAG_DECRYPTING)"
|
||||
return
|
||||
elif self.header.finderInfo[3] == FLAG_DECRYPTED:
|
||||
print "Volume already decrypted (finderInfo[3] == FLAG_DECRYPTED)"
|
||||
return
|
||||
self.failedToGetKey = []
|
||||
self.notEncrypted = []
|
||||
self.decryptedCount = 0
|
||||
self.flagVolume(FLAG_DECRYPTING)
|
||||
self.catalogTree.traverseLeafNodes(callback=self.decryptFile)
|
||||
self.flagVolume(FLAG_DECRYPTED)
|
||||
print "Decrypted %d files" % self.decryptedCount
|
||||
print "Failed to unwrap keys for : ", self.failedToGetKey
|
||||
print "Not encrypted files : %d" % len(self.notEncrypted)
|
||||
|
||||
def decryptFile(self, k,v):
|
||||
if v.recordType == kHFSPlusFileRecord:
|
||||
filename = getString(k).encode("utf-8")
|
||||
cprotect = self.getXattr(v.data.fileID, "com.apple.system.cprotect")
|
||||
if not cprotect:
|
||||
self.notEncrypted.append(filename)
|
||||
return
|
||||
fk = self.getFileKeyForCprotect(cprotect)
|
||||
if not fk:
|
||||
self.failedToGetKey.append(filename)
|
||||
return
|
||||
print "Decrypting", filename
|
||||
f = EMFFile(self, v.data.dataFork, v.data.fileID, fk)
|
||||
f.decryptFile()
|
||||
self.decryptedCount += 1
|
||||
|
||||
def list_protected_files(self):
|
||||
self.protected_dict = {}
|
||||
self.xattrTree.traverseLeafNodes(callback=self.inspectXattr)
|
||||
for k in self.protected_dict.keys():
|
||||
print k
|
||||
for v in self.protected_dict[k]: print "\t",v
|
||||
print ""
|
||||
|
||||
def inspectXattr(self, k, v):
|
||||
if getString(k) == "com.apple.system.cprotect" and k.fileID != kHFSRootParentID:
|
||||
c = cprotect_xattr.parse(v.data)
|
||||
if c.persistent_class != NSProtectionNone:
|
||||
#desc = "%d %s" % (k.fileID, self.getFullPath(k.fileID))
|
||||
desc = "%s" % self.getFullPath(k.fileID)
|
||||
self.protected_dict.setdefault(PROTECTION_CLASSES.get(c.persistent_class),[]).append(desc)
|
||||
#print k.fileID, self.getFullPath(k.fileID), PROTECTION_CLASSES.get(c.persistent_class)
|
||||
315
dump-imessages/iphone-dataprotection/python_scripts/hfs/hfs.py
Normal file
315
dump-imessages/iphone-dataprotection/python_scripts/hfs/hfs.py
Normal file
@@ -0,0 +1,315 @@
|
||||
from btree import AttributesTree, CatalogTree, ExtentsOverflowTree
|
||||
from structs import *
|
||||
from util import write_file
|
||||
from util.bdev import FileBlockDevice
|
||||
import datetime
|
||||
import hashlib
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
import zlib
|
||||
|
||||
def hfs_date(t):
|
||||
return datetime.datetime(1904,1,1) + datetime.timedelta(seconds=t)
|
||||
|
||||
class HFSFile(object):
|
||||
def __init__(self, volume, hfsplusfork, fileID, deleted=False):
|
||||
self.volume = volume
|
||||
self.blockSize = volume.blockSize
|
||||
self.fileID = fileID
|
||||
self.totalBlocks = hfsplusfork.totalBlocks
|
||||
self.logicalSize = hfsplusfork.logicalSize
|
||||
self.extents = volume.getAllExtents(hfsplusfork, fileID)
|
||||
self.deleted = deleted
|
||||
|
||||
def readAll(self, outputfile, truncate=True):
|
||||
f = open(outputfile, "wb")
|
||||
for i in xrange(self.totalBlocks):
|
||||
f.write(self.readBlock(i))
|
||||
if truncate:
|
||||
f.truncate(self.logicalSize)
|
||||
f.close()
|
||||
|
||||
def readAllBuffer(self, truncate=True):
|
||||
r = ""
|
||||
for i in xrange(self.totalBlocks):
|
||||
r += self.readBlock(i)
|
||||
if truncate:
|
||||
r = r[:self.logicalSize]
|
||||
return r
|
||||
|
||||
def processBlock(self, block, lba):
|
||||
return block
|
||||
|
||||
def readBlock(self, n):
|
||||
bs = self.volume.blockSize
|
||||
if n*bs > self.logicalSize:
|
||||
return "BLOCK OUT OF BOUNDS" + "\xFF" * (bs - len("BLOCK OUT OF BOUNDS"))
|
||||
bc = 0
|
||||
for extent in self.extents:
|
||||
bc += extent.blockCount
|
||||
if n < bc:
|
||||
lba = extent.startBlock+(n-(bc-extent.blockCount))
|
||||
if not self.deleted and self.fileID != kHFSAllocationFileID and not self.volume.isBlockInUse(lba):
|
||||
print "FAIL, block %x not marked as used" % n
|
||||
return self.processBlock(self.volume.readBlock(lba), lba)
|
||||
return ""
|
||||
|
||||
def getLBAforBlock(self, n):
|
||||
bc = 0
|
||||
for extent in self.extents:
|
||||
bc += extent.blockCount
|
||||
if n < bc:
|
||||
return extent.startBlock+(n-(bc-extent.blockCount))
|
||||
|
||||
def writeBlock(self, n, data):
|
||||
bs = self.volume.blockSize
|
||||
if n*bs > self.logicalSize:
|
||||
raise Exception("writeBlock, out of bounds %d" % n)
|
||||
bc = 0
|
||||
for extent in self.extents:
|
||||
bc += extent.blockCount
|
||||
if n < bc:
|
||||
lba = extent.startBlock+(n-(bc-extent.blockCount))
|
||||
self.volume.writeBlock(lba, data)
|
||||
return
|
||||
|
||||
|
||||
class HFSCompressedResourceFork(HFSFile):
|
||||
def __init__(self, volume, hfsplusfork, fileID):
|
||||
super(HFSCompressedResourceFork,self).__init__(volume, hfsplusfork, fileID)
|
||||
block0 = self.readBlock(0)
|
||||
self.header = HFSPlusCmpfRsrcHead.parse(block0)
|
||||
print self.header
|
||||
self.blocks = HFSPlusCmpfRsrcBlockHead.parse(block0[self.header.headerSize:])
|
||||
print "HFSCompressedResourceFork numBlocks:", self.blocks.numBlocks
|
||||
|
||||
#HAX, readblock not implemented
|
||||
def readAllBuffer(self):
|
||||
buff = super(HFSCompressedResourceFork, self).readAllBuffer()
|
||||
r = ""
|
||||
base = self.header.headerSize + 4
|
||||
for b in self.blocks.HFSPlusCmpfRsrcBlock:
|
||||
r += zlib.decompress(buff[base+b.offset:base+b.offset+b.size])
|
||||
return r
|
||||
|
||||
class HFSVolume(object):
|
||||
def __init__(self, bdev):
|
||||
self.bdev = bdev
|
||||
|
||||
try:
|
||||
data = self.bdev.readBlock(0)
|
||||
self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
|
||||
assert self.header.signature == 0x4858 or self.header.signature == 0x482B
|
||||
except:
|
||||
raise
|
||||
#raise Exception("Not an HFS+ image")
|
||||
|
||||
self.blockSize = self.header.blockSize
|
||||
self.bdev.setBlockSize(self.blockSize)
|
||||
|
||||
#if os.path.getsize(filename) < self.header.totalBlocks * self.blockSize:
|
||||
# print "WARNING: HFS image appears to be truncated"
|
||||
|
||||
self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
|
||||
self.allocationBitmap = self.allocationFile.readAllBuffer()
|
||||
self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
|
||||
self.extentsTree = ExtentsOverflowTree(self.extentsFile)
|
||||
self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
|
||||
self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
|
||||
self.catalogTree = CatalogTree(self.catalogFile, self)
|
||||
self.xattrTree = AttributesTree(self.xattrFile)
|
||||
|
||||
self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)
|
||||
|
||||
def readBlock(self, b):
|
||||
return self.bdev.readBlock(b)
|
||||
|
||||
def writeBlock(self, lba, data):
|
||||
return self.bdev.writeBlock(lba, data)
|
||||
|
||||
def volumeID(self):
|
||||
return struct.pack(">LL", self.header.finderInfo[6], self.header.finderInfo[7])
|
||||
|
||||
def isBlockInUse(self, block):
|
||||
thisByte = ord(self.allocationBitmap[block / 8])
|
||||
return (thisByte & (1 << (7 - (block % 8)))) != 0
|
||||
|
||||
def unallocatedBlocks(self):
|
||||
for i in xrange(self.header.totalBlocks):
|
||||
if not self.isBlockInUse(i):
|
||||
yield i, self.read(i*self.blockSize, self.blockSize)
|
||||
|
||||
def getExtentsOverflowForFile(self, fileID, startBlock, forkType=kForkTypeData):
|
||||
return self.extentsTree.searchExtents(fileID, forkType, startBlock)
|
||||
|
||||
def getXattr(self, fileID, name):
|
||||
return self.xattrTree.searchXattr(fileID, name)
|
||||
|
||||
def getFileByPath(self, path):
|
||||
return self.catalogTree.getRecordFromPath(path)
|
||||
|
||||
def getFileIDByPath(self, path):
|
||||
key, record = self.catalogTree.getRecordFromPath(path)
|
||||
if not record:
|
||||
return
|
||||
if record.recordType == kHFSPlusFolderRecord:
|
||||
return record.data.folderID
|
||||
return record.data.fileID
|
||||
|
||||
def listFolderContents(self, path):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
if not k or v.recordType != kHFSPlusFolderRecord:
|
||||
return
|
||||
for k,v in self.catalogTree.getFolderContents(v.data.folderID):
|
||||
if v.recordType == kHFSPlusFolderRecord:
|
||||
#.HFS+ Private Directory Data\r
|
||||
print v.data.folderID, getString(k).replace("\r","") + "/"
|
||||
elif v.recordType == kHFSPlusFileRecord:
|
||||
print v.data.fileID, getString(k)
|
||||
|
||||
def ls(self, path):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
return self._ls(k, v)
|
||||
|
||||
def _ls(self, k, v):
|
||||
res = {}
|
||||
|
||||
if not k or v.recordType != kHFSPlusFolderRecord:
|
||||
return None
|
||||
for k,v in self.catalogTree.getFolderContents(v.data.folderID):
|
||||
if v.recordType == kHFSPlusFolderRecord:
|
||||
#.HFS+ Private Directory Data\r
|
||||
res[getString(k).replace("\r","") + "/"] = v.data
|
||||
elif v.recordType == kHFSPlusFileRecord:
|
||||
res[getString(k)] = v.data
|
||||
return res
|
||||
|
||||
def listXattrs(self, path):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
if k and v.recordType == kHFSPlusFileRecord:
|
||||
return self.xattrTree.getAllXattrs(v.data.fileID)
|
||||
elif k and v.recordType == kHFSPlusFolderThreadRecord:
|
||||
return self.xattrTree.getAllXattrs(v.data.folderID)
|
||||
|
||||
def readFileByRecord(self, record):
|
||||
assert record.recordType == kHFSPlusFileRecord
|
||||
xattr = self.getXattr(record.data.fileID, "com.apple.decmpfs")
|
||||
data = None
|
||||
if xattr:
|
||||
decmpfs = HFSPlusDecmpfs.parse(xattr)
|
||||
if decmpfs.compression_type == 1:
|
||||
return xattr[16:]
|
||||
elif decmpfs.compression_type == 3:
|
||||
if decmpfs.uncompressed_size == len(xattr) - 16:
|
||||
return xattr[16:]
|
||||
return zlib.decompress(xattr[16:])
|
||||
elif decmpfs.compression_type == 4:
|
||||
f = HFSCompressedResourceFork(self, record.data.resourceFork, record.data.fileID)
|
||||
data = f.readAllBuffer()
|
||||
return data
|
||||
|
||||
f = HFSFile(self, record.data.dataFork, record.data.fileID)
|
||||
return f.readAllBuffer()
|
||||
|
||||
#TODO: returnString compress
|
||||
def readFile(self, path, outFolder="./", returnString=False):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
if not v:
|
||||
print "File %s not found" % path
|
||||
return
|
||||
assert v.recordType == kHFSPlusFileRecord
|
||||
xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
|
||||
if xattr:
|
||||
decmpfs = HFSPlusDecmpfs.parse(xattr)
|
||||
|
||||
if decmpfs.compression_type == 1:
|
||||
return xattr[16:]
|
||||
elif decmpfs.compression_type == 3:
|
||||
if decmpfs.uncompressed_size == len(xattr) - 16:
|
||||
z = xattr[16:]
|
||||
else:
|
||||
z = zlib.decompress(xattr[16:])
|
||||
open(outFolder + os.path.basename(path), "wb").write(z)
|
||||
return
|
||||
elif decmpfs.compression_type == 4:
|
||||
f = HFSCompressedResourceFork(self, v.data.resourceFork, v.data.fileID)
|
||||
z = f.readAllBuffer()
|
||||
open(outFolder + os.path.basename(path), "wb").write(z)
|
||||
return z
|
||||
|
||||
f = HFSFile(self, v.data.dataFork, v.data.fileID)
|
||||
if returnString:
|
||||
return f.readAllBuffer()
|
||||
else:
|
||||
f.readAll(outFolder + os.path.basename(path))
|
||||
|
||||
def readJournal(self):
|
||||
#jb = self.read(self.header.journalInfoBlock * self.blockSize, self.blockSize)
|
||||
#jib = JournalInfoBlock.parse(jb)
|
||||
#return self.read(jib.offset,jib.size)
|
||||
return self.readFile("/.journal", returnString=True)
|
||||
|
||||
def listAllFileIds(self):
|
||||
self.fileids={}
|
||||
self.catalogTree.traverseLeafNodes(callback=self.grabFileId)
|
||||
return self.fileids
|
||||
|
||||
def grabFileId(self, k,v):
|
||||
if v.recordType == kHFSPlusFileRecord:
|
||||
self.fileids[v.data.fileID] = True
|
||||
|
||||
def getFileRecordForFileID(self, fileID):
|
||||
k,v = self.catalogTree.searchByCNID(fileID)
|
||||
return v
|
||||
|
||||
def getFullPath(self, fileID):
|
||||
k,v = self.catalogTree.search((fileID, ""))
|
||||
if not k:
|
||||
print "File ID %d not found" % fileID
|
||||
return ""
|
||||
p = getString(v.data)
|
||||
while k:
|
||||
k,v = self.catalogTree.search((v.data.parentID, ""))
|
||||
if k.parentID == kHFSRootFolderID:
|
||||
break
|
||||
p = getString(v.data) + "/" + p
|
||||
|
||||
return "/" + p
|
||||
|
||||
def getFileRecordForPath(self, path):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
if not k:
|
||||
return
|
||||
return v.data
|
||||
|
||||
def getAllExtents(self, hfsplusfork, fileID):
|
||||
b = 0
|
||||
extents = []
|
||||
for extent in hfsplusfork.HFSPlusExtentDescriptor:
|
||||
extents.append(extent)
|
||||
b += extent.blockCount
|
||||
while b != hfsplusfork.totalBlocks:
|
||||
k,v = self.getExtentsOverflowForFile(fileID, b)
|
||||
if not v:
|
||||
print "extents overflow missing, startblock=%d" % b
|
||||
break
|
||||
for extent in v:
|
||||
extents.append(extent)
|
||||
b += extent.blockCount
|
||||
return extents
|
||||
|
||||
def dohashFiles(self, k,v):
|
||||
if v.recordType == kHFSPlusFileRecord:
|
||||
filename = getString(k)
|
||||
f = HFSFile(self, v.data.dataFork, v.data.fileID)
|
||||
print filename, hashlib.sha1(f.readAllBuffer()).hexdigest()
|
||||
|
||||
def hashFiles(self):
|
||||
self.catalogTree.traverseLeafNodes(callback=self.dohashFiles)
|
||||
|
||||
if __name__ == "__main__":
|
||||
v = HFSVolume("myramdisk.dmg",offset=0x40)
|
||||
v.listFolderContents("/")
|
||||
print v.readFile("/usr/local/share/restore/imeisv_svn.plist")
|
||||
print v.listXattrs("/usr/local/share/restore/imeisv_svn.plist")
|
||||
@@ -0,0 +1,152 @@
|
||||
from crypto.aes import AESencryptCBC, AESdecryptCBC
|
||||
from emf import cprotect_xattr, EMFFile
|
||||
from structs import *
|
||||
from util import write_file, sizeof_fmt
|
||||
import hashlib
|
||||
|
||||
"""
|
||||
Implementation of the following paper :
|
||||
Using the HFS+ Journal For Deleted File Recovery. Aaron Burghardt, Adam Feldman. DFRWS 2008
|
||||
http://www.dfrws.org/2008/proceedings/p76-burghardt.pdf
|
||||
http://www.dfrws.org/2008/proceedings/p76-burghardt_pres.pdf
|
||||
"""
|
||||
|
||||
def carveBtreeNode(node, kClass, dClass):
|
||||
try:
|
||||
btnode = BTNodeDescriptor.parse(node)
|
||||
|
||||
if btnode.kind == kBTLeafNode:
|
||||
off = BTNodeDescriptor.sizeof()
|
||||
recs = []
|
||||
offsets = Array(btnode.numRecords, UBInt16("off")).parse(node[-2*btnode.numRecords:])
|
||||
for i in xrange(btnode.numRecords):
|
||||
off = offsets[btnode.numRecords-i-1]
|
||||
k = kClass.parse(node[off:])
|
||||
off += 2 + k.keyLength
|
||||
d = dClass.parse(node[off:])
|
||||
recs.append((k,d))
|
||||
return recs
|
||||
return []
|
||||
except:
|
||||
return []
|
||||
|
||||
"""
|
||||
for standard HFS volumes
|
||||
"""
|
||||
def carveHFSVolumeJournal(volume):
|
||||
journal = volume.readJournal()
|
||||
hdr = journal_header.parse(journal)
|
||||
sector_size = hdr.jhdr_size
|
||||
nodeSize = volume.catalogTree.nodeSize
|
||||
|
||||
f={}
|
||||
for i in xrange(0,len(journal), sector_size):
|
||||
for k,v in carveBtreeNode(journal[i:i+nodeSize],HFSPlusCatalogKey, HFSPlusCatalogData):
|
||||
if v.recordType == kHFSPlusFileRecord:
|
||||
name = getString(k)
|
||||
h = hashlib.sha1(HFSPlusCatalogKey.build(k)).digest()
|
||||
if f.has_key(h):
|
||||
continue
|
||||
if volume.catalogTree.searchByCNID(v.data.fileID) == (None, None):
|
||||
if volume.isBlockInUse(v.data.dataFork.HFSPlusExtentDescriptor[0].startBlock) == False:
|
||||
print "deleted file", v.data.fileID, name
|
||||
fileid = v.data.fileID
|
||||
f[h]=(name, v)
|
||||
return f.values()
|
||||
|
||||
|
||||
magics=["SQLite", "bplist", "<?xml", "\xFF\xD8\xFF", "\xCE\xFA\xED\xFE", "\x89PNG", "\x00\x00\x00\x1CftypM4A",
|
||||
"\x00\x00\x00\x14ftypqt"]
|
||||
"""
|
||||
HAX: should do something better like compute entropy or something
|
||||
"""
|
||||
def isDecryptedCorrectly(data):
|
||||
for m in magics:
|
||||
if data.startswith(m):
|
||||
return True
|
||||
return False
|
||||
|
||||
"""
|
||||
carve the journal for deleted cprotect xattrs and file records
|
||||
"""
|
||||
def carveEMFVolumeJournal(volume):
|
||||
journal = volume.readJournal()
|
||||
print "Journal size : %s" % sizeof_fmt(len(journal))
|
||||
hdr = journal_header.parse(journal)
|
||||
sector_size = hdr.jhdr_size
|
||||
nodeSize = volume.catalogTree.nodeSize
|
||||
print "Collecting existing file ids"
|
||||
fileIds = volume.listAllFileIds()
|
||||
print "%d file IDs" % len(fileIds.keys())
|
||||
files = {}
|
||||
keys = {}
|
||||
|
||||
for i in xrange(0,len(journal),sector_size):
|
||||
for k,v in carveBtreeNode(journal[i:i+nodeSize],HFSPlusCatalogKey, HFSPlusCatalogData):
|
||||
if v.recordType == kHFSPlusFileRecord:
|
||||
name = getString(k)
|
||||
h = hashlib.sha1(HFSPlusCatalogKey.build(k)).digest()
|
||||
if files.has_key(h):
|
||||
continue
|
||||
if not fileIds.has_key(v.data.fileID):
|
||||
#we only keep files where the first block is not marked as in use
|
||||
if volume.isBlockInUse(v.data.dataFork.HFSPlusExtentDescriptor[0].startBlock) == False:
|
||||
print "Found deleted file record", v.data.fileID, name
|
||||
files[h] = (name,v)
|
||||
for k,v in carveBtreeNode(journal[i:i+nodeSize],HFSPlusAttrKey, HFSPlusAttrData):
|
||||
if getString(k) == "com.apple.system.cprotect":
|
||||
if not fileIds.has_key(k.fileID):
|
||||
filekeys = keys.setdefault(k.fileID, [])
|
||||
try:
|
||||
cprotect = cprotect_xattr.parse(v.data)
|
||||
except:
|
||||
continue
|
||||
#assert cprotect.xattr_major_version == 2
|
||||
filekey = volume.keybag.unwrapKeyForClass(cprotect.persistent_class, cprotect.persistent_key)
|
||||
if filekey and not filekey in filekeys:
|
||||
print "Found key for file", k.fileID
|
||||
filekeys.append(filekey)
|
||||
|
||||
return files.values(), keys
|
||||
|
||||
"""
|
||||
"bruteforce" method, tries to decrypt all unallocated blocks with provided file keys
|
||||
this is a hack, don't expect interesting results with this
|
||||
"""
|
||||
def carveEMFemptySpace(volume, file_keys, outdir):
|
||||
for lba, block in volume.unallocatedBlocks():
|
||||
iv = volume.ivForLBA(lba)
|
||||
for filekey in file_keys:
|
||||
ciphertext = AESencryptCBC(block, volume.emfkey, iv)
|
||||
clear = AESdecryptCBC(ciphertext, filekey, iv)
|
||||
if isDecryptedCorrectly(clear):
|
||||
print "Decrypted stuff at lba %x" % lba
|
||||
open(outdir+ "/%x.bin" % lba, "wb").write(clear)
|
||||
|
||||
|
||||
def do_emf_carving(volume, carveokdir, carvenokdir):
|
||||
deletedFiles, filekeys = carveEMFVolumeJournal(volume)
|
||||
|
||||
print "Journal carving done, trying to extract deleted files"
|
||||
n = 0
|
||||
for name, vv in deletedFiles:
|
||||
for filekey in filekeys.get(vv.data.fileID, []):
|
||||
ff = EMFFile(volume,vv.data.dataFork, vv.data.fileID, filekey, deleted=True)
|
||||
data = ff.readAllBuffer()
|
||||
if isDecryptedCorrectly(data):
|
||||
write_file(carveokdir + "%d_%s" % (vv.data.fileID, name.replace("/","_")),data)
|
||||
n += 1
|
||||
else:
|
||||
write_file(carvenokdir + "%d_%s" % (vv.data.fileID, name.replace("/","_")),data)
|
||||
if not filekeys.has_key(vv.data.fileID):
|
||||
print "Missing file key for", name
|
||||
else:
|
||||
del filekeys[vv.data.fileID]
|
||||
|
||||
print "Done, extracted %d files" % n
|
||||
|
||||
if False:
|
||||
fks = set(reduce(lambda x,y: x+y, filekeys.values()))
|
||||
print "%d file keys left, try carving empty space (slow) ? CTRL-C to exit" % len(fks)
|
||||
raw_input()
|
||||
carveEMFemptySpace(volume, fks)
|
||||
@@ -0,0 +1,341 @@
|
||||
from construct import *
|
||||
from construct.macros import UBInt64
|
||||
"""
|
||||
http://developer.apple.com/library/mac/#technotes/tn/tn1150.html
|
||||
"""
|
||||
|
||||
def getString(obj):
|
||||
return obj.HFSUniStr255.unicode
|
||||
|
||||
S_IFLNK = 0120000
|
||||
kSymLinkFileType = 0x736C6E6B
|
||||
kSymLinkCreator = 0x72686170
|
||||
kHardLinkFileType = 0x686C6E6B
|
||||
kHFSPlusCreator = 0x6866732B
|
||||
|
||||
kHFSCaseFolding = 0xCF
|
||||
kHFSBinaryCompare = 0xBC
|
||||
|
||||
|
||||
def is_symlink(rec):
|
||||
return rec.FileInfo.fileCreator == kSymLinkCreator and rec.FileInfo.fileType == kSymLinkFileType
|
||||
|
||||
kHFSRootParentID = 1
|
||||
kHFSRootFolderID = 2
|
||||
kHFSExtentsFileID = 3
|
||||
kHFSCatalogFileID = 4
|
||||
kHFSBadBlockFileID = 5
|
||||
kHFSAllocationFileID = 6
|
||||
kHFSStartupFileID = 7
|
||||
kHFSAttributesFileID = 8
|
||||
kHFSRepairCatalogFileID = 14
|
||||
kHFSBogusExtentFileID = 15
|
||||
kHFSFirstUserCatalogNodeID = 16
|
||||
|
||||
kBTLeafNode = -1
|
||||
kBTIndexNode = 0
|
||||
kBTHeaderNode = 1
|
||||
kBTMapNode = 2
|
||||
|
||||
kHFSPlusFolderRecord = 0x0001
|
||||
kHFSPlusFileRecord = 0x0002
|
||||
kHFSPlusFolderThreadRecord = 0x0003
|
||||
kHFSPlusFileThreadRecord = 0x0004
|
||||
|
||||
kHFSPlusAttrInlineData = 0x10
|
||||
kHFSPlusAttrForkData = 0x20
|
||||
kHFSPlusAttrExtents = 0x30
|
||||
|
||||
kForkTypeData = 0
|
||||
kForkTypeRsrc = 0xFF
|
||||
|
||||
kHFSVolumeHardwareLockBit = 7
|
||||
kHFSVolumeUnmountedBit = 8
|
||||
kHFSVolumeSparedBlocksBit = 9
|
||||
kHFSVolumeNoCacheRequiredBit = 10
|
||||
kHFSBootVolumeInconsistentBit = 11
|
||||
kHFSCatalogNodeIDsReusedBit = 12
|
||||
kHFSVolumeJournaledBit = 13
|
||||
kHFSVolumeSoftwareLockBit = 15
|
||||
|
||||
DECMPFS_MAGIC = 0x636d7066 #cmpf
|
||||
|
||||
HFSPlusExtentDescriptor = Struct("HFSPlusExtentDescriptor",
|
||||
UBInt32("startBlock"),
|
||||
UBInt32("blockCount")
|
||||
)
|
||||
HFSPlusExtentRecord = Array(8,HFSPlusExtentDescriptor)
|
||||
|
||||
HFSPlusForkData = Struct("HFSPlusForkData",
|
||||
UBInt64("logicalSize"),
|
||||
UBInt32("clumpSize"),
|
||||
UBInt32("totalBlocks"),
|
||||
Array(8, HFSPlusExtentDescriptor)
|
||||
)
|
||||
|
||||
HFSPlusVolumeHeader= Struct("HFSPlusVolumeHeader",
|
||||
UBInt16("signature"),
|
||||
UBInt16("version"),
|
||||
UBInt32("attributes"),
|
||||
UBInt32("lastMountedVersion"),
|
||||
UBInt32("journalInfoBlock"),
|
||||
UBInt32("createDate"),
|
||||
UBInt32("modifyDate"),
|
||||
UBInt32("backupDate"),
|
||||
UBInt32("checkedDate"),
|
||||
UBInt32("fileCount"),
|
||||
UBInt32("folderCount"),
|
||||
UBInt32("blockSize"),
|
||||
UBInt32("totalBlocks"),
|
||||
UBInt32("freeBlocks"),
|
||||
UBInt32("nextAllocation"),
|
||||
UBInt32("rsrcClumpSize"),
|
||||
UBInt32("dataClumpSize"),
|
||||
UBInt32("nextCatalogID"),
|
||||
UBInt32("writeCount"),
|
||||
UBInt64("encodingsBitmap"),
|
||||
|
||||
Array(8, UBInt32("finderInfo")),
|
||||
|
||||
Struct("allocationFile", Embed(HFSPlusForkData)),
|
||||
Struct("extentsFile", Embed(HFSPlusForkData)),
|
||||
Struct("catalogFile", Embed(HFSPlusForkData)),
|
||||
Struct("attributesFile", Embed(HFSPlusForkData)),
|
||||
Struct("startupFile", Embed(HFSPlusForkData)),
|
||||
)
|
||||
|
||||
BTNodeDescriptor = Struct("BTNodeDescriptor",
|
||||
UBInt32("fLink"),
|
||||
UBInt32("bLink"),
|
||||
SBInt8("kind"),
|
||||
UBInt8("height"),
|
||||
UBInt16("numRecords"),
|
||||
UBInt16("reserved")
|
||||
)
|
||||
|
||||
BTHeaderRec = Struct("BTHeaderRec",
|
||||
UBInt16("treeDepth"),
|
||||
UBInt32("rootNode"),
|
||||
UBInt32("leafRecords"),
|
||||
UBInt32("firstLeafNode"),
|
||||
UBInt32("lastLeafNode"),
|
||||
UBInt16("nodeSize"),
|
||||
UBInt16("maxKeyLength"),
|
||||
UBInt32("totalNodes"),
|
||||
UBInt32("freeNodes"),
|
||||
UBInt16("reserved1"),
|
||||
UBInt32("clumpSize"),
|
||||
UBInt8("btreeType"),
|
||||
UBInt8("keyCompareType"),
|
||||
UBInt32("attributes"),
|
||||
Array(16, UBInt32("reserved3"))
|
||||
)
|
||||
|
||||
HFSUniStr255 = Struct("HFSUniStr255",
|
||||
UBInt16("length"),
|
||||
String("unicode", lambda ctx: ctx["length"] * 2, encoding="utf-16-be")
|
||||
)
|
||||
|
||||
HFSPlusAttrKey = Struct("HFSPlusAttrKey",
|
||||
UBInt16("keyLength"),
|
||||
UBInt16("pad"),
|
||||
UBInt32("fileID"),
|
||||
UBInt32("startBlock"),
|
||||
HFSUniStr255,
|
||||
#UBInt32("nodeNumber")
|
||||
)
|
||||
|
||||
HFSPlusAttrData = Struct("HFSPlusAttrData",
|
||||
UBInt32("recordType"),
|
||||
Array(2, UBInt32("reserved")),
|
||||
UBInt32("size"),
|
||||
MetaField("data", lambda ctx: ctx["size"])
|
||||
)
|
||||
|
||||
HFSPlusCatalogKey = Struct("HFSPlusCatalogKey",
|
||||
UBInt16("keyLength"),
|
||||
UBInt32("parentID"),
|
||||
HFSUniStr255
|
||||
)
|
||||
|
||||
HFSPlusBSDInfo = Struct("HFSPlusBSDInfo",
|
||||
UBInt32("ownerID"),
|
||||
UBInt32("groupID"),
|
||||
UBInt8("adminFlags"),
|
||||
UBInt8("ownerFlags"),
|
||||
UBInt16("fileMode"),
|
||||
UBInt32("union_special")
|
||||
)
|
||||
|
||||
Point = Struct("Point",
|
||||
SBInt16("v"),
|
||||
SBInt16("h")
|
||||
)
|
||||
Rect = Struct("Rect",
|
||||
SBInt16("top"),
|
||||
SBInt16("left"),
|
||||
SBInt16("bottom"),
|
||||
SBInt16("right")
|
||||
)
|
||||
FileInfo = Struct("FileInfo",
|
||||
UBInt32("fileType"),
|
||||
UBInt32("fileCreator"),
|
||||
UBInt16("finderFlags"),
|
||||
Point,
|
||||
UBInt16("reservedField")
|
||||
)
|
||||
ExtendedFileInfo = Struct("ExtendedFileInfo",
|
||||
Array(4, SBInt16("reserved1")),
|
||||
UBInt16("extendedFinderFlags"),
|
||||
SBInt16("reserved2"),
|
||||
SBInt32("putAwayFolderID")
|
||||
)
|
||||
|
||||
FolderInfo = Struct("FolderInfo",
|
||||
Rect,
|
||||
UBInt16("finderFlags"),
|
||||
Point,
|
||||
UBInt16("reservedField")
|
||||
)
|
||||
|
||||
ExtendedFolderInfo = Struct("ExtendedFolderInfo",
|
||||
Point,
|
||||
SBInt32("reserved1"),
|
||||
UBInt16("extendedFinderFlags"),
|
||||
SBInt16("reserved2"),
|
||||
SBInt32("putAwayFolderID")
|
||||
)
|
||||
|
||||
HFSPlusCatalogFolder = Struct("HFSPlusCatalogFolder",
|
||||
UBInt16("flags"),
|
||||
UBInt32("valence"),
|
||||
UBInt32("folderID"),
|
||||
UBInt32("createDate"),
|
||||
UBInt32("contentModDate"),
|
||||
UBInt32("attributeModDate"),
|
||||
UBInt32("accessDate"),
|
||||
UBInt32("backupDate"),
|
||||
HFSPlusBSDInfo,
|
||||
FolderInfo,
|
||||
ExtendedFolderInfo,
|
||||
UBInt32("textEncoding"),
|
||||
UBInt32("reserved")
|
||||
)
|
||||
|
||||
HFSPlusCatalogFile = Struct("HFSPlusCatalogFile",
|
||||
UBInt16("flags"),
|
||||
UBInt32("reserved1"),
|
||||
UBInt32("fileID"),
|
||||
UBInt32("createDate"),
|
||||
UBInt32("contentModDate"),
|
||||
UBInt32("attributeModDate"),
|
||||
UBInt32("accessDate"),
|
||||
UBInt32("backupDate"),
|
||||
HFSPlusBSDInfo,
|
||||
FileInfo,
|
||||
ExtendedFileInfo,
|
||||
UBInt32("textEncoding"),
|
||||
UBInt32("reserved2"),
|
||||
Struct("dataFork", Embed(HFSPlusForkData)),
|
||||
Struct("resourceFork", Embed(HFSPlusForkData))
|
||||
)
|
||||
|
||||
HFSPlusCatalogThread = Struct("HFSPlusCatalogThread",
|
||||
SBInt16("reserved"),
|
||||
UBInt32("parentID"),
|
||||
HFSUniStr255,
|
||||
)
|
||||
|
||||
HFSPlusCatalogData = Struct("HFSPlusCatalogData",
|
||||
UBInt16("recordType"),
|
||||
Switch("data", lambda ctx: ctx["recordType"],
|
||||
{
|
||||
kHFSPlusFolderRecord : HFSPlusCatalogFolder,
|
||||
kHFSPlusFileRecord : HFSPlusCatalogFile,
|
||||
kHFSPlusFolderThreadRecord: HFSPlusCatalogThread,
|
||||
kHFSPlusFileThreadRecord: HFSPlusCatalogThread
|
||||
},
|
||||
default=HFSPlusCatalogFolder #XXX: should not reach
|
||||
)
|
||||
)
|
||||
|
||||
HFSPlusExtentKey = Struct("HFSPlusExtentKey",
|
||||
UBInt16("keyLength"),
|
||||
UBInt8("forkType"),
|
||||
UBInt8("pad"),
|
||||
UBInt32("fileID"),
|
||||
UBInt32("startBlock")
|
||||
)
|
||||
|
||||
HFSPlusDecmpfs = Struct("HFSPlusDecmpfs ",
|
||||
ULInt32("compression_magic"),
|
||||
ULInt32("compression_type"),
|
||||
ULInt64("uncompressed_size"),
|
||||
)
|
||||
|
||||
HFSPlusCmpfRsrcHead = Struct("HFSPlusCmpfRsrcHead",
|
||||
UBInt32("headerSize"),
|
||||
UBInt32("totalSize"),
|
||||
UBInt32("dataSize"),
|
||||
UBInt32("flags")
|
||||
)
|
||||
|
||||
HFSPlusCmpfRsrcBlock = Struct("HFSPlusCmpfRsrcBlock",
|
||||
ULInt32("offset"),
|
||||
ULInt32("size")
|
||||
)
|
||||
|
||||
HFSPlusCmpfRsrcBlockHead = Struct("HFSPlusCmpfRsrcBlockHead",
|
||||
UBInt32("dataSize"),
|
||||
ULInt32("numBlocks"),
|
||||
Array(lambda ctx:ctx["numBlocks"], HFSPlusCmpfRsrcBlock)
|
||||
)
|
||||
|
||||
HFSPlusCmpfEnd = Struct("HFSPlusCmpfEnd",
|
||||
Array(6, UBInt32("pad")),
|
||||
UBInt16("unk1"),
|
||||
UBInt16("unk2"),
|
||||
UBInt16("unk3"),
|
||||
UBInt32("magic"),
|
||||
UBInt32("flags"),
|
||||
UBInt64("size"),
|
||||
UBInt32("unk4")
|
||||
)
|
||||
|
||||
|
||||
"""
|
||||
Journal stuff
|
||||
"""
|
||||
JournalInfoBlock = Struct("JournalInfoBlock",
|
||||
UBInt32("flags"),
|
||||
Array(8, UBInt32("device_signature")),
|
||||
UBInt64("offset"),
|
||||
UBInt64("size"),
|
||||
Array(32, UBInt32("reserved"))
|
||||
)
|
||||
|
||||
journal_header = Struct("journal_header",
|
||||
ULInt32("magic"),
|
||||
ULInt32("endian"),
|
||||
ULInt64("start"),
|
||||
ULInt64("end"),
|
||||
ULInt64("size"),
|
||||
ULInt32("blhdr_size"),
|
||||
ULInt32("checksum"),
|
||||
ULInt32("jhdr_size")
|
||||
)
|
||||
|
||||
block_info = Struct("block_info",
|
||||
ULInt64("bnum"),
|
||||
ULInt32("bsize"),
|
||||
ULInt32("next")
|
||||
)
|
||||
|
||||
block_list_header = Struct("block_list_header",
|
||||
ULInt16("max_blocks"),
|
||||
ULInt16("num_blocks"),
|
||||
ULInt32("bytes_used"),
|
||||
SLInt32("checksum"),
|
||||
UBInt32("pad"),
|
||||
Array(lambda ctx:ctx["num_blocks"], block_info)
|
||||
)
|
||||
Reference in New Issue
Block a user