initial code for dumping imessages in a reasonable format
This commit is contained in:
@@ -0,0 +1,381 @@
|
||||
from crypto.aes import AESdecryptCBC, AESencryptCBC
|
||||
from hfs.emf import cprotect_xattr, EMFVolume
|
||||
from hfs.hfs import HFSVolume, hfs_date, HFSFile
|
||||
from hfs.journal import carveBtreeNode, isDecryptedCorrectly
|
||||
from hfs.structs import *
|
||||
from util import sizeof_fmt, makedirs, hexdump
|
||||
import hashlib
|
||||
import os
|
||||
import struct
|
||||
|
||||
class NANDCarver(object):
|
||||
def __init__(self, volume, image, outputdir=None):
|
||||
self.volume = volume
|
||||
self.image = image
|
||||
self.nand = image
|
||||
self.ftlhax = False
|
||||
self.userblocks = None
|
||||
self.lpnToVpn = None
|
||||
self.files = {}
|
||||
self.keys = {}
|
||||
self.encrypted = image.encrypted and hasattr(volume, "emfkey")
|
||||
self.encrypted = hasattr(volume, "cp_root") and volume.cp_root != None
|
||||
if outputdir == None:
|
||||
if image.filename != "remote": outputdir = os.path.join(os.path.dirname(image.filename), "undelete")
|
||||
else: outputdir = os.path.join(".", "undelete")
|
||||
print "Carver output %s" % outputdir
|
||||
self.outputdir = outputdir
|
||||
self.okfiles = 0
|
||||
self.first_lba = self.volume.bdev.lbaoffset
|
||||
self.pageSize = image.pageSize
|
||||
self.blankPage = "\xDE\xAD\xBE\xEF" * (self.pageSize/4)
|
||||
self.emfkey = None
|
||||
self.fileIds = None
|
||||
self.fastMode = False
|
||||
if hasattr(volume, "emfkey"):
|
||||
self.emfkey = volume.emfkey
|
||||
|
||||
def carveFile(self, hfsfile, callback, lbas=None, filter_=None):
|
||||
for e in hfsfile.extents:
|
||||
if e.blockCount == 0:
|
||||
break
|
||||
for i in xrange(e.startBlock, e.startBlock+e.blockCount):
|
||||
if lbas and not i in lbas:
|
||||
continue
|
||||
|
||||
if self.fastMode:
|
||||
for vpn in self.ftlhax.get(self.first_lba+i, []):
|
||||
usn = 0
|
||||
s,d = self.nand.ftl.YAFTL_readPage(vpn, self.emfkey, self.first_lba+i)
|
||||
callback(d, usn, filter_)
|
||||
else:
|
||||
# s,d = self.nand.ftl.YAFTL_readPage(vpn, self.emfkey, self.first_lba+i)
|
||||
# callback(d, 0)
|
||||
usnsForLbn = self.ftlhax.get(self.first_lba+i, [])
|
||||
for usn in sorted(usnsForLbn.keys())[:-1]:
|
||||
for vpn in usnsForLbn[usn]:
|
||||
s,d = self.nand.ftl.YAFTL_readPage(vpn, self.emfkey, self.first_lba+i)
|
||||
callback(d, usn, filter_)
|
||||
|
||||
def _catalogFileCallback(self, data, usn, filter_):
|
||||
for k,v in carveBtreeNode(data,HFSPlusCatalogKey, HFSPlusCatalogData):
|
||||
if v.recordType != kHFSPlusFileRecord:
|
||||
continue
|
||||
if filter_ and not filter_(k,v):
|
||||
continue
|
||||
name = getString(k)
|
||||
#if not self.filterFileName(name):
|
||||
# continue
|
||||
h = hashlib.sha1(HFSPlusCatalogKey.build(k)).digest()
|
||||
if self.files.has_key(h):
|
||||
continue
|
||||
if not self.fileIds.has_key(v.data.fileID):
|
||||
print "Found deleted file record", v.data.fileID, name.encode("utf-8"), "created", hfs_date(v.data.createDate)
|
||||
self.files[h] = (name,v, usn)
|
||||
|
||||
def _attributesFileCallback(self, data, usn, filter_):
|
||||
for k,v in carveBtreeNode(data,HFSPlusAttrKey, HFSPlusAttrData):
|
||||
if getString(k) != "com.apple.system.cprotect":
|
||||
continue
|
||||
if self.fileIds.has_key(k.fileID):
|
||||
continue
|
||||
filekeys = self.keys.setdefault(k.fileID, [])
|
||||
try:
|
||||
cprotect = cprotect_xattr.parse(v.data)
|
||||
except:
|
||||
continue
|
||||
if cprotect.key_size == 0:
|
||||
continue
|
||||
filekey = self.volume.keybag.unwrapKeyForClass(cprotect.persistent_class, cprotect.persistent_key, False)
|
||||
if filekey and not filekey in filekeys:
|
||||
#print "Found key for file ID ", k.fileID
|
||||
filekeys.append(filekey)
|
||||
|
||||
def carveCatalog(self, lbas=None, filter_=None):
|
||||
return self.carveFile(self.volume.catalogFile, self._catalogFileCallback, lbas, filter_)
|
||||
|
||||
def carveKeys(self, lbas=None):
|
||||
return self.carveFile(self.volume.xattrFile, self._attributesFileCallback, lbas)
|
||||
|
||||
def pagesForLBN(self, lbn):
|
||||
return self.ftlhax.get(self.first_lba + lbn, {})
|
||||
|
||||
def decryptFileBlock(self, pn, filekey, lbn, decrypt_offset):
|
||||
s, ciphertext = self.nand.ftl.YAFTL_readPage(pn, None, lbn)
|
||||
if not self.encrypted:
|
||||
return ciphertext
|
||||
if not self.image.isIOS5():
|
||||
return AESdecryptCBC(ciphertext, filekey, self.volume.ivForLBA(lbn))
|
||||
clear = ""
|
||||
ivkey = hashlib.sha1(filekey).digest()[:16]
|
||||
for i in xrange(len(ciphertext)/0x1000):
|
||||
iv = self.volume.ivForLBA(decrypt_offset, False)
|
||||
iv = AESencryptCBC(iv, ivkey)
|
||||
clear += AESdecryptCBC(ciphertext[i*0x1000:(i+1)*0x1000], filekey, iv)
|
||||
decrypt_offset += 0x1000
|
||||
return clear
|
||||
|
||||
def writeUndeletedFile(self, filename, data):
|
||||
knownExtensions = (".m4a", ".plist",".sqlite",".sqlitedb", ".jpeg", ".jpg", ".png", ".db",".json",".xml",".sql")
|
||||
#windows invalid chars \/:*?"<>|
|
||||
filename = str(filename.encode("utf-8")).translate(None, "\\/:*?\"<>|,")
|
||||
folder = self.outputdir
|
||||
if self.outputdir == "./":
|
||||
folder = folder + "/undelete"
|
||||
elif filename.lower().endswith(knownExtensions):
|
||||
ext = filename[filename.rfind(".")+1:]
|
||||
folder = folder + "/" + ext.lower()
|
||||
makedirs(folder)
|
||||
open(folder + "/" + filename, "wb").write(data)
|
||||
|
||||
def getFileAtUSN(self, filename, filerecord, filekey, usn, previousVersion=None, exactSize=True):
|
||||
missing_pages = 0
|
||||
decrypt_offset = 0
|
||||
file_pages = []
|
||||
logicalSize = filerecord.dataFork.logicalSize
|
||||
for extent in self.volume.getAllExtents(filerecord.dataFork, filerecord.fileID):
|
||||
for bn in xrange(extent.startBlock, extent.startBlock + extent.blockCount):
|
||||
pn = self.pagesForLBN(bn).get(usn) #fail
|
||||
if pn:
|
||||
clear = self.decryptFileBlock(pn[-1], filekey, bn, decrypt_offset)
|
||||
file_pages.append(clear)
|
||||
elif previousVersion:
|
||||
file_pages.append(previousVersion[len(file_pages)])
|
||||
else:
|
||||
file_pages.append(self.blankPage)
|
||||
missing_pages += 1
|
||||
decrypt_offset += self.pageSize
|
||||
|
||||
print "Recovered %d:%s %d missing pages, size %s, created %s, contentModDate %s" % \
|
||||
(filerecord.fileID, filename.encode("utf-8"), missing_pages, sizeof_fmt(logicalSize), hfs_date(filerecord.createDate), hfs_date(filerecord.contentModDate))
|
||||
filename = "%d_%d_%s" % (filerecord.fileID, usn, filename)
|
||||
if missing_pages == 0:
|
||||
filename = "OK_" + filename
|
||||
self.okfiles += 1
|
||||
data = "".join(file_pages)
|
||||
if exactSize:
|
||||
data = data[:logicalSize]
|
||||
self.writeUndeletedFile(filename, data)
|
||||
return file_pages
|
||||
|
||||
#test for SQLite
|
||||
def rollbackExistingFile(self, filename):
|
||||
filerecord = self.volume.getFileRecordForPath(filename)
|
||||
filekey = self.volume.getFileKeyForFileId(filerecord.fileID)
|
||||
print "filekey", filekey.encode("hex")
|
||||
z = None
|
||||
for extent in filerecord.dataFork.HFSPlusExtentDescriptor:
|
||||
for bn in xrange(extent.startBlock, extent.startBlock + extent.blockCount):
|
||||
pages = self.pagesForLBN(bn)
|
||||
print pages
|
||||
for usn in sorted(pages.keys()):
|
||||
d = self.decryptFileBlock(pages[usn][-1], filekey, bn, 0)
|
||||
if d.startswith("SQL") or True:
|
||||
filechangecounter = struct.unpack(">L", d[24:28])
|
||||
print usn, "OK", filechangecounter
|
||||
z = self.getFileAtUSN(os.path.basename(filename), filerecord, filekey, usn, z)
|
||||
else:
|
||||
print usn, "FAIL"
|
||||
return
|
||||
|
||||
def filterFileName(self, filename):
|
||||
return filename.lower().endswith(".jpg")
|
||||
|
||||
def getExistingFileIDs(self):
|
||||
print "Collecting existing file ids"
|
||||
self.fileIds = self.volume.listAllFileIds()
|
||||
print "%d file IDs" % len(self.fileIds.keys())
|
||||
|
||||
def carveDeletedFiles_fast(self, catalogLBAs=None, filter_=None):
|
||||
self.fastMode = True
|
||||
if not self.ftlhax:
|
||||
hax, userblocks = self.nand.ftl.YAFTL_lookup1()
|
||||
self.ftlhax = hax
|
||||
self.userblocks = userblocks
|
||||
|
||||
self.files = {}
|
||||
if not self.fileIds:
|
||||
self.getExistingFileIDs()
|
||||
print "Carving catalog file"
|
||||
#catalogLBAs = None
|
||||
self.carveCatalog(catalogLBAs, filter_)
|
||||
|
||||
keysLbas = []
|
||||
for name, vv, usn in self.files.values():
|
||||
for i in xrange(vv.data.fileID, vv.data.fileID + 100):
|
||||
if self.volume.xattrTree.search((i, "com.apple.system.cprotect")):
|
||||
keysLbas.extend(self.volume.xattrTree.getLBAsHax())
|
||||
break
|
||||
|
||||
#print "keysLbas", keysLbas
|
||||
if self.encrypted and len(self.keys) == 0:
|
||||
print "Carving attribute file for file keys"
|
||||
#self.carveKeys(keysLbas)
|
||||
self.carveKeys()
|
||||
|
||||
self.okfiles = 0
|
||||
total = 0
|
||||
print "%d files, %d keys" % (len(self.files), len(self.keys))
|
||||
for name, vv, usn in self.files.values():
|
||||
if not self.keys.has_key(vv.data.fileID):
|
||||
print "No file key for %s" % name.encode("utf-8")
|
||||
keys = set(self.keys.get(vv.data.fileID, [self.emfkey]))
|
||||
print "%s" % (name.encode("utf-8"))
|
||||
if self.readFileHax(name, vv.data, keys):
|
||||
total += 1
|
||||
|
||||
print "Carving done, recovered %d deleted files, %d are most likely OK" % (total, self.okfiles)
|
||||
|
||||
def carveDeleteFiles_slow(self, catalogLBAs=None, filter_=None):
|
||||
self.fastMode = False
|
||||
self.files = {}
|
||||
if not self.ftlhax:
|
||||
self.ftlhax = self.nand.ftl.YAFTL_hax2()
|
||||
if not self.fileIds:
|
||||
self.getExistingFileIDs()
|
||||
if self.encrypted and len(self.keys) == 0:
|
||||
print "Carving attribute file for file keys"
|
||||
self.carveKeys()
|
||||
print "Carving catalog file"
|
||||
self.carveCatalog(catalogLBAs, filter_)
|
||||
|
||||
self.okfiles = 0
|
||||
total = 0
|
||||
print "%d files" % len(self.files)
|
||||
for name, vv, usn in self.files.values():
|
||||
keys = set(self.keys.get(vv.data.fileID, [self.emfkey]))
|
||||
print "%s num keys = %d" % (name, len(keys))
|
||||
good_usn = 0
|
||||
for filekey in keys:
|
||||
if good_usn:
|
||||
break
|
||||
first_block = vv.data.dataFork.HFSPlusExtentDescriptor[0].startBlock
|
||||
for usn, pn in self.pagesForLBN(first_block).items():
|
||||
d = self.decryptFileBlock(pn[-1], filekey, self.first_lba+first_block, 0)
|
||||
if isDecryptedCorrectly(d):
|
||||
#print "USN for first block : ", usn
|
||||
good_usn = usn
|
||||
break
|
||||
if good_usn == 0:
|
||||
continue
|
||||
self.getFileAtUSN(name, vv.data, filekey, good_usn)
|
||||
|
||||
def getBBTOC(self, block):
|
||||
btoc = self.nand.ftl.readBTOCPages(block, self.nand.ftl.totalPages)
|
||||
if not btoc:
|
||||
return self.nand.ftl.block_lpn_to_vpn(block)
|
||||
bbtoc = {}
|
||||
for i in xrange(len(btoc)):
|
||||
bbtoc[btoc[i]] = block*self.nand.ftl.vfl.pages_per_sublk + i
|
||||
return bbtoc
|
||||
|
||||
def readFileHax(self, filename, filerecord, filekeys):
|
||||
lba0 = self.first_lba + filerecord.dataFork.HFSPlusExtentDescriptor[0].startBlock
|
||||
filekey = None
|
||||
good_usn = None
|
||||
first_vpn = 0
|
||||
first_usn = 0
|
||||
hax = self.ftlhax
|
||||
print "%d versions for first lba" % len(hax.get(lba0, []))
|
||||
for k in filekeys:
|
||||
for vpn in hax.get(lba0, []):
|
||||
s, ciphertext = self.nand.ftl.YAFTL_readPage(vpn, key=None, lpn=None)
|
||||
if not ciphertext:
|
||||
continue
|
||||
d = self.decryptFileBlock2(ciphertext, k, lba0, 0)
|
||||
#hexdump(d[:16])
|
||||
if isDecryptedCorrectly(d):
|
||||
filekey = k
|
||||
first_vpn = vpn
|
||||
first_usn = good_usn = s.usn
|
||||
block = vpn / self.nand.ftl.vfl.pages_per_sublk
|
||||
break
|
||||
if not filekey:
|
||||
return False
|
||||
logicalSize = filerecord.dataFork.logicalSize
|
||||
missing_pages = 0
|
||||
file_pages = []
|
||||
lbns = []
|
||||
for extent in self.volume.getAllExtents(filerecord.dataFork, filerecord.fileID):
|
||||
for bn in xrange(extent.startBlock, extent.startBlock + extent.blockCount):
|
||||
lbns.append(self.first_lba + bn)
|
||||
datas = {}
|
||||
usnblocksToLookAT = sorted(filter(lambda x: x >= good_usn, self.userblocks.keys()))[:5]
|
||||
print usnblocksToLookAT
|
||||
usnblocksToLookAT.insert(0, 0)
|
||||
first_block = True
|
||||
done = False
|
||||
for usn in usnblocksToLookAT:
|
||||
if first_block:
|
||||
bbtoc = self.getBBTOC(block)
|
||||
first_block = False
|
||||
else:
|
||||
bbtoc = self.getBBTOC(self.userblocks[usn])
|
||||
for lbn in bbtoc.keys():
|
||||
if not lbn in lbns:
|
||||
continue
|
||||
idx = lbns.index(lbn)
|
||||
s, ciphertext = self.nand.ftl.YAFTL_readPage(bbtoc[lbn], key=None, lpn=None)
|
||||
if not ciphertext:
|
||||
continue
|
||||
ciphertext = self.decryptFileBlock2(ciphertext, filekey, lbn, idx*self.pageSize)
|
||||
if idx == 0:
|
||||
if not isDecryptedCorrectly(ciphertext):
|
||||
continue
|
||||
datas[idx*self.pageSize] = (ciphertext, lbn - self.first_lba)
|
||||
#if idx == len(lbns):
|
||||
if len(datas) == len(lbns):
|
||||
done=True
|
||||
break
|
||||
|
||||
if done:
|
||||
break
|
||||
cleartext = ""
|
||||
decrypt_offset = 0
|
||||
for i in xrange(0,logicalSize, self.pageSize):
|
||||
if datas.has_key(i):
|
||||
ciphertext, lbn = datas[i]
|
||||
cleartext += ciphertext
|
||||
else:
|
||||
cleartext += self.blankPage
|
||||
missing_pages += 1
|
||||
decrypt_offset += self.pageSize
|
||||
|
||||
print "Recovered %d:%s %d missing pages, size %s, created %s, contentModDate %s" % \
|
||||
(filerecord.fileID, filename.encode("utf-8"), missing_pages, sizeof_fmt(logicalSize), hfs_date(filerecord.createDate), hfs_date(filerecord.contentModDate))
|
||||
filename = "%d_%d_%s" % (filerecord.fileID, first_usn, filename)
|
||||
if missing_pages == 0:
|
||||
filename = "OK_" + filename
|
||||
self.okfiles += 1
|
||||
if True:#exactSize:
|
||||
cleartext = cleartext[:logicalSize]
|
||||
self.writeUndeletedFile(filename, cleartext)
|
||||
return True
|
||||
|
||||
def decryptFileBlock2(self, ciphertext, filekey, lbn, decrypt_offset):
|
||||
if not self.encrypted:
|
||||
return ciphertext
|
||||
if not self.image.isIOS5():
|
||||
return AESdecryptCBC(ciphertext, filekey, self.volume.ivForLBA(lbn, add=False))
|
||||
clear = ""
|
||||
ivkey = hashlib.sha1(filekey).digest()[:16]
|
||||
for i in xrange(len(ciphertext)/0x1000):
|
||||
iv = self.volume.ivForLBA(decrypt_offset, False)
|
||||
iv = AESencryptCBC(iv, ivkey)
|
||||
clear += AESdecryptCBC(ciphertext[i*0x1000:(i+1)*0x1000], filekey, iv)
|
||||
decrypt_offset += 0x1000
|
||||
return clear
|
||||
|
||||
def getFileRanges(self, hfsfile):
|
||||
res = []
|
||||
for e in hfsfile.extents:
|
||||
if e.blockCount == 0:
|
||||
break
|
||||
res.append(xrange(e.startBlock, e.startBlock+e.blockCount))
|
||||
return res
|
||||
|
||||
def readFSPage(self, vpn, lba):
|
||||
s,d = self.nand.ftl.YAFTL_readPage(vpn, self.emfkey, self.first_lba+lba)
|
||||
if s:
|
||||
return d
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
|
||||
"""
|
||||
row-by-row dump
|
||||
page = data + spare metadata + iokit return code + iokit return code 2
|
||||
"""
|
||||
class NANDImageFlat(object):
|
||||
def __init__(self, filename, geometry):
|
||||
flags = os.O_RDONLY
|
||||
if sys.platform == "win32":
|
||||
flags |= os.O_BINARY
|
||||
self.fd = os.open(filename, flags)
|
||||
self.nCEs = geometry["#ce"]
|
||||
self.pageSize = geometry["#page-bytes"]
|
||||
self.metaSize = geometry.get("meta-per-logical-page", 12)
|
||||
self.dumpedPageSize = geometry.get("dumpedPageSize", self.pageSize + self.metaSize + 8)
|
||||
self.hasIOKitStatus = True
|
||||
if self.dumpedPageSize == self.pageSize + geometry["#spare-bytes"] or self.dumpedPageSize == self.pageSize + self.metaSize:
|
||||
self.hasIOKitStatus = False
|
||||
self.blankPage = "\xFF" * self.pageSize
|
||||
self.blankSpare = "\xFF" * self.metaSize
|
||||
self.imageSize = os.path.getsize(filename)
|
||||
expectedSize = geometry["#ce"] * geometry["#ce-blocks"] * geometry["#block-pages"] * self.dumpedPageSize
|
||||
if self.imageSize < expectedSize:
|
||||
raise Exception("Error: image appears to be truncated, expected size=%d" % expectedSize)
|
||||
print "Image size matches expected size, looks ok"
|
||||
|
||||
def _readPage(self, ce, page):
|
||||
i = page * self.nCEs + ce
|
||||
off = i * self.dumpedPageSize
|
||||
os.lseek(self.fd, off, os.SEEK_SET)
|
||||
return os.read(self.fd, self.dumpedPageSize)
|
||||
|
||||
def readPage(self, ce, page):
|
||||
d = self._readPage(ce, page)
|
||||
if not d or len(d) != self.dumpedPageSize:
|
||||
return None,None
|
||||
if self.hasIOKitStatus:
|
||||
r1,r2 = struct.unpack("<LL", d[self.pageSize+self.metaSize:self.pageSize+self.metaSize+8])
|
||||
if r1 != 0x0:
|
||||
return None, None
|
||||
data = d[:self.pageSize]
|
||||
spare = d[self.pageSize:self.pageSize+self.metaSize]
|
||||
if not self.hasIOKitStatus and data == self.blankPage and spare == self.blankSpare:
|
||||
return None,None
|
||||
return spare, data
|
||||
|
||||
"""
|
||||
iEmu NAND format
|
||||
one file for each CE, start with chip id (8 bytes) then pages
|
||||
page = non-empty flag (1 byte) + data + spare metadata (12 bytes)
|
||||
"""
|
||||
class NANDImageSplitCEs(object):
|
||||
def __init__(self, folder, geometry):
|
||||
flags = os.O_RDONLY
|
||||
if sys.platform == "win32":
|
||||
flags |= os.O_BINARY
|
||||
self.fds = []
|
||||
self.nCEs = geometry["#ce"]
|
||||
self.pageSize = geometry["#page-bytes"]
|
||||
self.metaSize = 12
|
||||
self.npages = 0
|
||||
self.dumpedPageSize = 1 + self.pageSize + self.metaSize
|
||||
for i in xrange(self.nCEs):
|
||||
fd = os.open(folder + "/ce_%d.bin" % i, flags)
|
||||
self.fds.append(fd)
|
||||
self.npages += os.fstat(fd).st_size / self.dumpedPageSize
|
||||
|
||||
def _readPage(self, ce, page):
|
||||
fd = self.fds[ce]
|
||||
off = 8 + page * self.dumpedPageSize #skip chip id
|
||||
os.lseek(fd, off, os.SEEK_SET)
|
||||
return os.read(fd, self.dumpedPageSize)
|
||||
|
||||
def readPage(self, ce, page):
|
||||
d = self._readPage(ce, page)
|
||||
if not d or len(d) != self.dumpedPageSize:
|
||||
return None,None
|
||||
if d[0] != '1' and d[0] != '\x01':
|
||||
return None,None
|
||||
data = d[1:1+self.pageSize]
|
||||
spare = d[1+self.pageSize:1+self.pageSize+self.metaSize]
|
||||
return spare, data
|
||||
|
||||
@@ -0,0 +1,220 @@
|
||||
from carver import NANDCarver
|
||||
from construct.core import Struct
|
||||
from construct.macros import ULInt32, ULInt16, Array, ULInt8, Padding
|
||||
from pprint import pprint
|
||||
from structs import SpareData
|
||||
from util import hexdump
|
||||
from vfl import VFL
|
||||
import plistlib
|
||||
|
||||
"""
|
||||
openiboot/plat-s5l8900/ftl.c
|
||||
openiboot/plat-s5l8900/includes/s5l8900/ftl.h
|
||||
"""
|
||||
FTLCxtLog = Struct("FTLCxtLog",
|
||||
ULInt32("usn"),
|
||||
ULInt16("wVbn"),
|
||||
ULInt16("wLbn"),
|
||||
ULInt32("wPageOffsets"),
|
||||
ULInt16("pagesUsed"),
|
||||
ULInt16("pagesCurrent"),
|
||||
ULInt32("isSequential")
|
||||
)
|
||||
|
||||
FTLCxtElement2 = Struct("FTLCxtElement2",
|
||||
ULInt16("field_0"),
|
||||
ULInt16("field_2")
|
||||
)
|
||||
|
||||
FTLCxt = Struct("FTLCxt",
|
||||
ULInt32("usnDec"),
|
||||
ULInt32("nextblockusn"),
|
||||
ULInt16("wNumOfFreeVb"),
|
||||
ULInt16("nextFreeIdx"),
|
||||
ULInt16("swapCounter"),
|
||||
Array(20, ULInt16("awFreeVb")),
|
||||
ULInt16("field_36"),
|
||||
Array(18, ULInt32("pages_for_pawMapTable")),
|
||||
Array(36, ULInt32("pages_for_pawEraseCounterTable")),
|
||||
Array(34, ULInt32("pages_for_wPageOffsets")),
|
||||
ULInt32("pawMapTable"),
|
||||
ULInt32("pawEraseCounterTable"),
|
||||
ULInt32("wPageOffsets"),
|
||||
Array(18, FTLCxtLog),
|
||||
ULInt32("eraseCounterPagesDirty"),
|
||||
ULInt16("unk3"),
|
||||
Array(3, ULInt16("FTLCtrlBlock")),
|
||||
ULInt32("FTLCtrlPage"),
|
||||
ULInt32("clean"),
|
||||
Array(36, ULInt32("pages_for_pawReadCounterTable")),
|
||||
ULInt32("pawReadCounterTable"),
|
||||
Array(5, FTLCxtElement2),
|
||||
ULInt32("field_3C8"),
|
||||
ULInt32("totalReadCount"),
|
||||
ULInt32("page_for_FTLCountsTable"),
|
||||
ULInt32("hasFTLCountsTable"),
|
||||
Padding(0x420), #, ULInt8("field_3D8")),
|
||||
ULInt32("versionLower"),
|
||||
ULInt32("versionUpper")
|
||||
)
|
||||
|
||||
FTL_CTX_TYPE = 0x43
|
||||
FTL_BLOCK_MAP = 0x44
|
||||
FTL_ERASE_COUNTER = 0x46
|
||||
FTL_MOUNTED = 0x47
|
||||
FTL_CTX_TYPE_MAX = 0x4F
|
||||
USER_TYPE = 0x40
|
||||
USER_LAST_TYPE = 0x41 #last user page in superblock?
|
||||
|
||||
class FTL(object):
|
||||
def __init__(self, nand, vfl):
|
||||
self.nand = nand
|
||||
self.vfl = vfl
|
||||
self.pawMapTable = {} #maps logical blocks to virtual blocks
|
||||
self.pLogs = {}
|
||||
if not self.FTL_open():
|
||||
self.FTL_restore()
|
||||
|
||||
def FTL_open(self):
|
||||
minUsnDec = 0xffffffff
|
||||
ftlCtrlBlock = 0xffff
|
||||
for vb in self.vfl.VFL_get_FTLCtrlBlock():
|
||||
s, d = self.vfl.read_single_page(vb * self.vfl.pages_per_sublk)
|
||||
if not s:
|
||||
continue
|
||||
if s.type >= FTL_CTX_TYPE and s.type <= FTL_CTX_TYPE_MAX:
|
||||
if s.usn < minUsnDec:
|
||||
ftlCtrlBlock = vb
|
||||
minUsnDec = s.usn
|
||||
|
||||
print ftlCtrlBlock
|
||||
self.ftlCtrlBlock = ftlCtrlBlock
|
||||
for p in xrange(self.vfl.pages_per_sublk-1,1, -1):
|
||||
s, d = self.vfl.read_single_page(ftlCtrlBlock * self.vfl.pages_per_sublk + p)
|
||||
if not s:
|
||||
continue
|
||||
#print s
|
||||
#print p
|
||||
if s.type == FTL_CTX_TYPE:
|
||||
print s.usn
|
||||
ctx = FTLCxt.parse(d)
|
||||
if ctx.versionLower == 0x46560001:
|
||||
print ctx
|
||||
assert ctx.FTLCtrlPage == (ftlCtrlBlock * self.vfl.pages_per_sublk + p)
|
||||
break
|
||||
else:
|
||||
print "Unclean shutdown, last type 0x%x" % s.type
|
||||
return False
|
||||
self.ctx = ctx
|
||||
print "FTL_open OK !"
|
||||
return True
|
||||
|
||||
def determine_block_type(self, block):
|
||||
maxUSN = 0
|
||||
isSequential = True
|
||||
for page in xrange(self.vfl.pages_per_sublk-1,1, -1):
|
||||
s, _ = self.vfl.read_single_page(block * self.vfl.pages_per_sublk + page)
|
||||
if not s:
|
||||
continue
|
||||
if s.usn > maxUSN:
|
||||
maxUSN = s.usn
|
||||
if s.lpn % self.vfl.pages_per_sublk != page:
|
||||
isSequential = False
|
||||
return isSequential, maxUSN
|
||||
return isSequential, maxUSN
|
||||
|
||||
def FTL_restore(self):
|
||||
self.pLogs = self.vfl.nand.loadCachedData("pLogs")
|
||||
self.pawMapTable = self.vfl.nand.loadCachedData("pawMapTable")
|
||||
if self.pLogs and self.pawMapTable:
|
||||
print "Found cached FTL restore information"
|
||||
return
|
||||
self.pawMapTable = {}
|
||||
self.pLogs = {}
|
||||
ctx = None
|
||||
for p in xrange(self.vfl.pages_per_sublk-1,1, -1):
|
||||
s, d = self.vfl.read_single_page(self.ftlCtrlBlock * self.vfl.pages_per_sublk + p)
|
||||
if not s:
|
||||
continue
|
||||
if s.type == FTL_CTX_TYPE:
|
||||
print s.usn
|
||||
ctx = FTLCxt.parse(d)
|
||||
if ctx.versionLower == 0x46560001:
|
||||
print ctx
|
||||
assert ctx.FTLCtrlPage == (self.ftlCtrlBlock * self.vfl.pages_per_sublk + p)
|
||||
print "Found most recent ctx"
|
||||
break
|
||||
if not ctx:
|
||||
print "FTL_restore fail did not find ctx"
|
||||
raise
|
||||
blockMap = {}
|
||||
self.nonSequential = {}
|
||||
print "FTL_restore in progress ..."
|
||||
for sblock in xrange(self.vfl.userSuBlksTotal + 23):
|
||||
for page in xrange(self.vfl.pages_per_sublk):
|
||||
s, d = self.vfl.read_single_page(sblock * self.vfl.pages_per_sublk + page)
|
||||
if not s:
|
||||
continue
|
||||
if s.type >= FTL_CTX_TYPE and s.type <= FTL_CTX_TYPE_MAX:
|
||||
break
|
||||
if s.type != USER_TYPE and s.type != USER_LAST_TYPE:
|
||||
print "Weird page type %x at %x %x" % (s.type, sblock, page)
|
||||
continue
|
||||
if s.lpn % self.vfl.pages_per_sublk != page:
|
||||
print "Block %d non sequential" % sblock
|
||||
self.nonSequential[sblock] = 1
|
||||
blockMap[sblock] = (s.lpn / self.vfl.pages_per_sublk, s.usn)
|
||||
break
|
||||
|
||||
z = dict([(i, [(a, blockMap[a][1]) for a in blockMap.keys() if blockMap[a][0] ==i]) for i in xrange(self.vfl.userSuBlksTotal)])
|
||||
for k,v in z.items():
|
||||
if len(v) == 2:
|
||||
print k, v
|
||||
vbA, usnA = v[0]
|
||||
vbB, usnB = v[1]
|
||||
if usnA > usnB: #smallest USN is map block, highest log block
|
||||
self.pawMapTable[k] = vbB
|
||||
self.restoreLogBlock(k, vbA)
|
||||
else:
|
||||
self.pawMapTable[k] = vbA
|
||||
self.restoreLogBlock(k, vbB)
|
||||
elif len(v) > 2:
|
||||
raise Exception("fufu", k, v)
|
||||
else:
|
||||
self.pawMapTable[k] = v[0][0]
|
||||
self.vfl.nand.cacheData("pLogs", self.pLogs)
|
||||
self.vfl.nand.cacheData("pawMapTable", self.pawMapTable)
|
||||
|
||||
def restoreLogBlock(self, lbn, vbn):
|
||||
log = {"wVbn": vbn, "wPageOffsets": {}}
|
||||
for page in xrange(self.vfl.pages_per_sublk):
|
||||
s, d = self.vfl.read_single_page(vbn * self.vfl.pages_per_sublk + page)
|
||||
if not s:
|
||||
break
|
||||
log["wPageOffsets"][s.lpn % self.vfl.pages_per_sublk] = page
|
||||
self.pLogs[lbn] = log
|
||||
|
||||
def mapPage(self, lbn, offset):
|
||||
if self.pLogs.has_key(lbn):
|
||||
if self.pLogs[lbn]["wPageOffsets"].has_key(offset):
|
||||
offset = self.pLogs[lbn]["wPageOffsets"][offset]
|
||||
#print "mapPage got log %d %d" % (lbn, offset)
|
||||
return self.pLogs[lbn]["wVbn"] * self.vfl.pages_per_sublk + offset
|
||||
if not self.pawMapTable.has_key(lbn):
|
||||
return 0xFFFFFFFF
|
||||
return self.pawMapTable[lbn] * self.vfl.pages_per_sublk + offset
|
||||
|
||||
def readLPN(self, lpn, key=None):
|
||||
lbn = lpn / self.vfl.pages_per_sublk
|
||||
offset = lpn % self.vfl.pages_per_sublk
|
||||
vpn = self.mapPage(lbn, offset)
|
||||
if vpn == 0xFFFFFFFF:
|
||||
print "lbn not found %d" % lbn
|
||||
return "\xFF" * self.nand.pageSize
|
||||
s,d = self.vfl.read_single_page(vpn, key, lpn)
|
||||
if not s:
|
||||
return None
|
||||
if s.lpn != lpn:
|
||||
raise Exception("FTL translation FAIL spare lpn=%d vs expected %d" % (s.lpn, lpn))
|
||||
return d
|
||||
|
||||
425
dump-imessages/iphone-dataprotection/python_scripts/nand/nand.py
Normal file
425
dump-imessages/iphone-dataprotection/python_scripts/nand/nand.py
Normal file
@@ -0,0 +1,425 @@
|
||||
from crypto.aes import AESdecryptCBC
|
||||
from firmware.img2 import IMG2
|
||||
from firmware.img3 import Img3, extract_img3s
|
||||
from firmware.scfg import parse_SCFG
|
||||
from hfs.emf import EMFVolume
|
||||
from hfs.hfs import HFSVolume
|
||||
from image import NANDImageSplitCEs, NANDImageFlat
|
||||
from keystore.effaceable import check_effaceable_header, EffaceableLockers
|
||||
from legacyftl import FTL
|
||||
from partition_tables import GPT_partitions, parse_lwvm, parse_mbr, parse_gpt, \
|
||||
APPLE_ENCRYPTED
|
||||
from progressbar import ProgressBar
|
||||
from remote import NANDRemote, IOFlashStorageKitClient
|
||||
from structs import *
|
||||
from util import sizeof_fmt, write_file, load_pickle, save_pickle, hexdump, \
|
||||
makedirs
|
||||
from util.bdev import FTLBlockDevice
|
||||
from vfl import VFL
|
||||
from vsvfl import VSVFL
|
||||
from yaftl import YAFTL
|
||||
import math
|
||||
import os
|
||||
import plistlib
|
||||
import struct
|
||||
|
||||
def ivForPage(page):
|
||||
iv = ""
|
||||
for _ in xrange(4):
|
||||
if (page & 1):
|
||||
page = 0x80000061 ^ (page >> 1);
|
||||
else:
|
||||
page = page >> 1;
|
||||
iv += struct.pack("<L", page)
|
||||
return iv
|
||||
|
||||
#iOS 3
|
||||
def getEMFkeyFromCRPT(data, key89B):
|
||||
assert data.startswith("tprc")
|
||||
z = AESdecryptCBC(data[4:0x44], key89B)
|
||||
assert z.startswith("TPRC"), "wrong key89B"
|
||||
#last_byte = struct.unpack("<Q", z[4:4+8])[0]
|
||||
emf = z[16:16+32]
|
||||
return emf
|
||||
|
||||
class NAND(object):
|
||||
H2FMI_HASH_TABLE = gen_h2fmi_hash_table()
|
||||
|
||||
def __init__(self, filename, device_infos, ppn=False):
|
||||
self.device_infos = device_infos
|
||||
self.partition_table = None
|
||||
self.lockers = {}
|
||||
self.iosVersion = 0
|
||||
self.hasMBR = False
|
||||
self.metadata_whitening = False
|
||||
self.filename = filename
|
||||
self.encrypted = device_infos["hwModel"] not in ["M68AP", "N45AP", "N82AP", "N72AP"]
|
||||
self.initGeometry(device_infos["nand"])
|
||||
|
||||
if os.path.basename(filename).startswith("ce_"):
|
||||
self.image = NANDImageSplitCEs(os.path.dirname(filename), device_infos["nand"])
|
||||
elif filename == "remote":
|
||||
self.image = NANDRemote(self.pageSize, self.metaSize, self.pagesPerBlock, self.bfn)
|
||||
else:
|
||||
self.image = NANDImageFlat(filename, device_infos["nand"])
|
||||
|
||||
s, page0 = self.readPage(0,0)
|
||||
self.nandonly = (page0 != None) and page0.startswith("ndrG")
|
||||
if self.nandonly:
|
||||
self.encrypted = True
|
||||
|
||||
magics = ["DEVICEINFOBBT"]
|
||||
nandsig = None
|
||||
if page0 and page0[8:14] == "Darwin":
|
||||
print "Found old style signature", page0[:8]
|
||||
nandsig = page0
|
||||
else:
|
||||
magics.append("NANDDRIVERSIGN")
|
||||
|
||||
#sp0 = {}
|
||||
sp0 = self.readSpecialPages(0, magics)
|
||||
print "Found %s special pages in CE 0" % (", ".join(sp0.keys()))
|
||||
if not self.nandonly:
|
||||
print "Device does not boot from NAND (=> has a NOR)"
|
||||
|
||||
vfltype = '1' #use VSVFL by default
|
||||
if not nandsig:
|
||||
nandsig = sp0.get("NANDDRIVERSIGN")
|
||||
if not nandsig:
|
||||
print "NANDDRIVERSIGN not found, assuming metadata withening = %d" % self.metadata_whitening
|
||||
else:
|
||||
nSig, flags = struct.unpack("<LL", nandsig[:8])
|
||||
#assert nandsig[3] == chr(0x43)
|
||||
vfltype = nandsig[1]
|
||||
self.metadata_whitening = (flags & 0x10000) != 0
|
||||
print "NAND signature 0x%x flags 0x%x withening=%d, epoch=%s" % (nSig, flags, self.metadata_whitening, nandsig[0])
|
||||
|
||||
if not self.nandonly:
|
||||
if self.device_infos.has_key("lockers"):
|
||||
self.lockers = EffaceableLockers(self.device_infos.lockers.data)
|
||||
else:
|
||||
unit = self.findLockersUnit()
|
||||
if unit:
|
||||
self.lockers = EffaceableLockers(unit[0x40:])
|
||||
self.lockers.display()
|
||||
if not self.device_infos.has_key("lockers"):
|
||||
self.device_infos.lockers = plistlib.Data(unit[0x40:0x40+960])
|
||||
EMF = self.getEMF(device_infos["key89B"].decode("hex"))
|
||||
dkey = self.getDKey(device_infos["key835"].decode("hex"))
|
||||
self.device_infos.EMF = EMF.encode("hex")
|
||||
self.device_infos.DKey = dkey.encode("hex")
|
||||
|
||||
deviceuniqueinfo = sp0.get("DEVICEUNIQUEINFO")
|
||||
if not deviceuniqueinfo:
|
||||
print "DEVICEUNIQUEINFO not found"
|
||||
else:
|
||||
scfg = parse_SCFG(deviceuniqueinfo)
|
||||
print "Found DEVICEUNIQUEINFO, serial number=%s" % scfg.get("SrNm","SrNm not found !")
|
||||
|
||||
if vfltype == '0':
|
||||
print "Using legacy VFL"
|
||||
self.vfl = VFL(self)
|
||||
self.ftl = FTL(self, self.vfl)
|
||||
elif not ppn:
|
||||
print "Using VSVFL"
|
||||
self.vfl = VSVFL(self)
|
||||
self.ftl = YAFTL(self.vfl)
|
||||
|
||||
def initGeometry(self, d):
|
||||
self.metaSize = d.get("meta-per-logical-page", 0)
|
||||
if self.metaSize == 0:
|
||||
self.metaSize = 12
|
||||
dumpedPageSize = d.get("dumpedPageSize", d["#page-bytes"] + self.metaSize + 8)
|
||||
self.dump_size= d["#ce"] * d["#ce-blocks"] * d["#block-pages"] * dumpedPageSize
|
||||
self.totalPages = d["#ce"] * d["#ce-blocks"] * d["#block-pages"]
|
||||
nand_size = d["#ce"] * d["#ce-blocks"] * d["#block-pages"] * d["#page-bytes"]
|
||||
hsize = sizeof_fmt(nand_size)
|
||||
self.bfn = d.get("boot-from-nand", False)
|
||||
self.dumpedPageSize = dumpedPageSize
|
||||
self.pageSize = d["#page-bytes"]
|
||||
self.bootloaderBytes = d.get("#bootloader-bytes", 1536)
|
||||
self.emptyBootloaderPage = "\xFF" * self.bootloaderBytes
|
||||
self.blankPage = "\xFF" * self.pageSize
|
||||
self.nCEs =d["#ce"]
|
||||
self.blocksPerCE = d["#ce-blocks"]
|
||||
self.pagesPerBlock = d["#block-pages"]
|
||||
self.pagesPerCE = self.blocksPerCE * self.pagesPerBlock
|
||||
self.vendorType = d["vendor-type"]
|
||||
self.deviceReadId = d.get("device-readid", 0)
|
||||
self.banks_per_ce_vfl = d["banks-per-ce"]
|
||||
if d.has_key("metadata-whitening"):
|
||||
self.metadata_whitening = (d["metadata-whitening"].data == "\x01\x00\x00\x00")
|
||||
if nand_chip_info.has_key(self.deviceReadId):
|
||||
self.banks_per_ce_physical = nand_chip_info.get(self.deviceReadId)[7]
|
||||
else:
|
||||
#raise Exception("Unknown deviceReadId %x" % self.deviceReadId)
|
||||
print "!!! Unknown deviceReadId %x, assuming 1 physical bank /CE, will probably fail" % self.deviceReadId
|
||||
self.banks_per_ce_physical = 1
|
||||
print "Chip id 0x%x banks per CE physical %d" % (self.deviceReadId, self.banks_per_ce_physical)
|
||||
self.blocks_per_bank = self.blocksPerCE / self.banks_per_ce_physical
|
||||
if self.blocksPerCE & (self.blocksPerCE-1) == 0:
|
||||
self.bank_address_space = self.blocks_per_bank
|
||||
self.total_block_space = self.blocksPerCE
|
||||
else:
|
||||
bank_address_space = next_power_of_two(self.blocks_per_bank)
|
||||
self.bank_address_space = bank_address_space
|
||||
self.total_block_space = ((self.banks_per_ce_physical-1)*bank_address_space) + self.blocks_per_bank
|
||||
self.bank_mask = int(math.log(self.bank_address_space * self.pagesPerBlock,2))
|
||||
print "NAND geometry : %s (%d CEs (%d physical banks/CE) of %d blocks of %d pages of %d bytes data, %d bytes metdata)" % \
|
||||
(hsize, self.nCEs, self.banks_per_ce_physical, self.blocksPerCE, self.pagesPerBlock, self.pageSize, d["meta-per-logical-page"])
|
||||
|
||||
def unwhitenMetadata(self, meta, pagenum):
|
||||
if len(meta) != 12:
|
||||
return None
|
||||
s = list(struct.unpack("<LLL", meta))
|
||||
for i in xrange(3):
|
||||
s[i] ^= NAND.H2FMI_HASH_TABLE[(i+pagenum) % len(NAND.H2FMI_HASH_TABLE)]
|
||||
return struct.pack("<LLL", s[0], s[1],s[2])
|
||||
|
||||
def readBootPage(self, ce, page):
|
||||
s,d=self.readPage(ce, page)
|
||||
if d:
|
||||
return d[:self.bootloaderBytes]
|
||||
else:
|
||||
#print "readBootPage %d %d failed" % (ce,page)
|
||||
return self.emptyBootloaderPage
|
||||
|
||||
def readMetaPage(self, ce, block, page, spareType=SpareData):
|
||||
return self.readBlockPage(ce, block, page, META_KEY, spareType=spareType)
|
||||
|
||||
def readBlockPage(self, ce, block, page, key=None, lpn=None, spareType=SpareData):
|
||||
assert page < self.pagesPerBlock
|
||||
pn = block * self.pagesPerBlock + page
|
||||
return self.readPage(ce, pn, key, lpn, spareType=spareType)
|
||||
|
||||
def translateabsPage(self, page):
|
||||
return page % self.nCEs, page/self.nCEs
|
||||
|
||||
def readAbsPage(self, page, key=None, lpn=None):
|
||||
return self.readPage(page % self.nCEs, page/self.nCEs, key, lpn)
|
||||
|
||||
def readPage(self, ce, page, key=None, lpn=None, spareType=SpareData):
|
||||
if ce > self.nCEs or page > self.pagesPerCE:
|
||||
#hax physical banking
|
||||
pass#raise Exception("CE %d Page %d out of bounds" % (ce, page))
|
||||
if self.filename != "remote": #undo banking hax
|
||||
bank = (page & ~((1 << self.bank_mask) - 1)) >> self.bank_mask
|
||||
page2 = (page & ((1 << self.bank_mask) - 1))
|
||||
page2 = bank * (self.blocks_per_bank) * self.pagesPerBlock + page2
|
||||
spare, data = self.image.readPage(ce, page2)
|
||||
else:
|
||||
spare, data = self.image.readPage(ce, page)
|
||||
if not data:
|
||||
return None,None
|
||||
if self.metadata_whitening and spare != "\x00"*12 and len(spare) == 12:
|
||||
spare = self.unwhitenMetadata(spare, page)
|
||||
spare = spareType.parse(spare)
|
||||
if key and self.encrypted:
|
||||
if lpn != None: pageNum = spare.lpn #XXX
|
||||
else: pageNum = page
|
||||
return spare, self.decryptPage(data, key, pageNum)
|
||||
return spare, data
|
||||
|
||||
def decryptPage(self, data, key, pageNum):
|
||||
return AESdecryptCBC(data, key, ivForPage(pageNum))
|
||||
|
||||
def unpackSpecialPage(self, data):
|
||||
l = struct.unpack("<L", data[0x34:0x38])[0]
|
||||
return data[0x38:0x38 + l]
|
||||
|
||||
def readSpecialPages(self, ce, magics):
|
||||
print "Searching for special pages..."
|
||||
specials = {}
|
||||
if self.nandonly:
|
||||
magics.append("DEVICEUNIQUEINFO")#, "DIAGCONTROLINFO")
|
||||
magics = map(lambda s: s.ljust(16,"\x00"), magics)
|
||||
|
||||
lowestBlock = self.blocksPerCE - (self.blocksPerCE / 100)
|
||||
for block in xrange(self.blocksPerCE - 1, lowestBlock, -1):
|
||||
if len(magics) == 0:
|
||||
break
|
||||
#hax for physical banking
|
||||
bank_offset = self.bank_address_space * (block / self.blocks_per_bank)
|
||||
for page in xrange(self.pagesPerBlock,-1,-1):
|
||||
page = (bank_offset + block % self.blocks_per_bank) * self.pagesPerBlock + page
|
||||
s, data = self.readPage(ce, page)
|
||||
if data == None:
|
||||
continue
|
||||
if data[:16] in magics:
|
||||
self.encrypted = False
|
||||
magics.remove(data[:16])
|
||||
specials[data[:16].rstrip("\x00")] = self.unpackSpecialPage(data)
|
||||
break
|
||||
data = self.decryptPage(data, META_KEY, page)
|
||||
#print data[:16]
|
||||
if data[:16] in magics:
|
||||
#print data[:16], block, page
|
||||
self.encrypted = True
|
||||
magics.remove(data[:16])
|
||||
specials[data[:16].rstrip("\x00")] = self.unpackSpecialPage(data)
|
||||
break
|
||||
return specials
|
||||
|
||||
def readLPN(self, lpn, key):
|
||||
return self.ftl.readLPN(lpn, key)
|
||||
|
||||
def readVPN(self, vpn, key=None, lpn=None):
|
||||
return self.vfl.read_single_page(vpn, key, lpn)
|
||||
|
||||
def dumpSystemPartition(self, outputfilename):
|
||||
return self.getPartitionBlockDevice(0).dumpToFile(outputfilename)
|
||||
|
||||
def dumpDataPartition(self, emf, outputfilename):
|
||||
return self.getPartitionBlockDevice(1, emf).dumpToFile(outputfilename)
|
||||
|
||||
def isIOS5(self):
|
||||
self.getPartitionTable()
|
||||
return self.iosVersion == 5
|
||||
|
||||
def getPartitionTable(self):
|
||||
if self.partition_table:
|
||||
return self.partition_table
|
||||
pt = None
|
||||
for i in xrange(10):
|
||||
d = self.readLPN(i, FILESYSTEM_KEY)
|
||||
pt = parse_mbr(d)
|
||||
if pt:
|
||||
self.hasMBR = True
|
||||
self.iosVersion = 3
|
||||
break
|
||||
gpt = parse_gpt(d)
|
||||
if gpt:
|
||||
off = gpt.partition_entries_lba - gpt.current_lba
|
||||
d = self.readLPN(i+off, FILESYSTEM_KEY)
|
||||
pt = GPT_partitions.parse(d)[:-1]
|
||||
self.iosVersion = 4
|
||||
break
|
||||
pt = parse_lwvm(d, self.pageSize)
|
||||
if pt:
|
||||
self.iosVersion = 5
|
||||
break
|
||||
self.partition_table = pt
|
||||
return pt
|
||||
|
||||
def getPartitionBlockDevice(self, partNum, key=None):
|
||||
pt = self.getPartitionTable()
|
||||
if self.hasMBR and pt[1].type == APPLE_ENCRYPTED and partNum == 1:
|
||||
data = self.readLPN(pt[1].last_lba - 1, FILESYSTEM_KEY)
|
||||
key = getEMFkeyFromCRPT(data, self.device_infos["key89B"].decode("hex"))
|
||||
if key == None:
|
||||
if partNum == 0:
|
||||
key = FILESYSTEM_KEY
|
||||
elif partNum == 1 and self.device_infos.has_key("EMF"):
|
||||
key = self.device_infos["EMF"].decode("hex")
|
||||
return FTLBlockDevice(self, pt[partNum].first_lba, pt[partNum].last_lba, key)
|
||||
|
||||
def getPartitionVolume(self, partNum, key=None):
|
||||
bdev = self.getPartitionBlockDevice(partNum, key)
|
||||
if partNum == 0:
|
||||
return HFSVolume(bdev)
|
||||
elif partNum == 1:
|
||||
self.device_infos["dataVolumeOffset"] = self.getPartitionTable()[partNum].first_lba
|
||||
return EMFVolume(bdev, self.device_infos)
|
||||
|
||||
def findLockersUnit(self):
|
||||
if not self.nandonly:
|
||||
return
|
||||
for i in xrange(96,128):
|
||||
for ce in xrange(self.nCEs):
|
||||
s, d = self.readBlockPage(ce, 1, i)
|
||||
if d and check_effaceable_header(d):
|
||||
print "Found effaceable lockers in ce %d block 1 page %d" % (ce,i)
|
||||
return d
|
||||
|
||||
def getLockers(self):
|
||||
unit = self.findLockersUnit()
|
||||
if unit:
|
||||
return unit[0x40:0x40+960]
|
||||
|
||||
def getEMF(self, k89b):
|
||||
return self.lockers.get_EMF(k89b)
|
||||
|
||||
def getDKey(self, k835):
|
||||
return self.lockers.get_DKey(k835)
|
||||
|
||||
def readBootPartition(self, block_start, block_end):
|
||||
res = ""
|
||||
for i in xrange(block_start*self.pagesPerBlock, block_end*self.pagesPerBlock):
|
||||
res += self.readBootPage(0, i)
|
||||
return res
|
||||
|
||||
def get_img3s(self):
|
||||
if not self.nandonly:
|
||||
print "IMG3s are in NOR"
|
||||
return []
|
||||
blob = self.readBootPartition(8, 16)
|
||||
hdr = IMG2.parse(blob[:0x100])
|
||||
i = hdr.images_block * hdr.block_size + hdr.images_offset
|
||||
img3s = extract_img3s(blob[i:i+hdr.images_length*hdr.block_size])
|
||||
|
||||
boot = self.readBootPartition(0, 1)
|
||||
img3s = extract_img3s(boot[0xc00:]) + img3s
|
||||
return img3s
|
||||
|
||||
def extract_img3s(self, outfolder=None):
|
||||
if not self.nandonly:
|
||||
print "IMG3s are in NOR"
|
||||
return
|
||||
if outfolder == None:
|
||||
if self.filename != "remote": outfolder = os.path.join(os.path.dirname(self.filename), "img3")
|
||||
else: outfolder = os.path.join(".", "img3")
|
||||
makedirs(outfolder)
|
||||
print "Extracting IMG3s to %s" % outfolder
|
||||
for img3 in self.get_img3s():
|
||||
#print img3.sigcheck(self.device_infos.get("key89A").decode("hex"))
|
||||
print img3.shortname
|
||||
write_file(outfolder+ "/%s.img3" % img3.shortname, img3.img3)
|
||||
kernel = self.getPartitionVolume(0).readFile("/System/Library/Caches/com.apple.kernelcaches/kernelcache",returnString=True)
|
||||
if kernel:
|
||||
print "kernel"
|
||||
write_file(outfolder + "/kernelcache.img3", kernel)
|
||||
|
||||
def extract_shsh(self, outfolder="."):
|
||||
if not self.nandonly:
|
||||
print "IMG3s are in NOR"
|
||||
return
|
||||
pass
|
||||
|
||||
def getNVRAM(self):
|
||||
if not self.nandonly:
|
||||
print "NVRAM is in NOR"
|
||||
return
|
||||
#TODO
|
||||
nvrm = self.readBootPartition(2, 8)
|
||||
|
||||
def getBoot(self):
|
||||
boot = self.readBootPartition(0, 1)
|
||||
for i in xrange(0x400, 0x600, 16):
|
||||
name = boot[i:i+4][::-1]
|
||||
block_start, block_end, flag = struct.unpack("<LLL", boot[i+4:i+16])
|
||||
if name == "none":
|
||||
break
|
||||
print name, block_start, block_end, flag
|
||||
|
||||
def cacheData(self, name, data):
|
||||
if self.filename == "remote":
|
||||
return None
|
||||
save_pickle(self.filename + "." + name, data)
|
||||
|
||||
def loadCachedData(self, name):
|
||||
try:
|
||||
if self.filename == "remote":
|
||||
return None
|
||||
return load_pickle(self.filename + "." + name)
|
||||
except:
|
||||
return None
|
||||
|
||||
def dump(self, p):
|
||||
#hax ioflashstoragekit can only handle 1 connexion
|
||||
if self.filename == "remote":
|
||||
del self.image
|
||||
ioflash = IOFlashStorageKitClient()
|
||||
ioflash.dump_nand(p)
|
||||
#restore proxy
|
||||
if self.filename == "remote":
|
||||
self.image = NANDRemote(self.pageSize, self.metaSize, self.pagesPerBlock, self.bfn)
|
||||
@@ -0,0 +1,132 @@
|
||||
from construct import *
|
||||
from zipfile import crc32
|
||||
|
||||
GPT_HFS = "005346480000aa11aa1100306543ecac".decode("hex")
|
||||
GPT_EMF = "00464d450000aa11aa1100306543ecac".decode("hex")
|
||||
|
||||
LWVM_partitionRecord = Struct("LWVM_partitionRecord",
|
||||
String("type", 16),
|
||||
String("guid", 16),
|
||||
ULInt64("begin"),
|
||||
ULInt64("end"),
|
||||
ULInt64("attribute"),
|
||||
String("name", 0x48, encoding="utf-16-le", padchar="\x00")
|
||||
)
|
||||
|
||||
LWVM_MAGIC = "6a9088cf8afd630ae351e24887e0b98b".decode("hex")
|
||||
LWVM_header = Struct("LWVM_header",
|
||||
String("type",16),
|
||||
String("guid", 16),
|
||||
ULInt64("mediaSize"),
|
||||
ULInt32("numPartitions"),
|
||||
ULInt32("crc32"),
|
||||
Padding(464),
|
||||
Array(12, LWVM_partitionRecord),
|
||||
Array(1024, ULInt16("chunks"))
|
||||
)
|
||||
|
||||
GPT_header = Struct("GPT_header",
|
||||
String("signature", 8),
|
||||
ULInt32("revision"),
|
||||
ULInt32("header_size"),
|
||||
SLInt32("crc"), #hax to match python signed crc
|
||||
ULInt32("zero"),
|
||||
ULInt64("current_lba"),
|
||||
ULInt64("backup_lba"),
|
||||
ULInt64("first_usable_lba"),
|
||||
ULInt64("last_usable_lba"),
|
||||
String("disk_guid", 16),
|
||||
ULInt64("partition_entries_lba"),
|
||||
ULInt32("num_partition_entries"),
|
||||
ULInt32("size_partition_entry"),
|
||||
ULInt32("crc_partition_entries")
|
||||
)
|
||||
|
||||
GPT_entry = Struct("GPT_entry",
|
||||
String("partition_type_guid", 16),
|
||||
String("partition_guid", 16),
|
||||
ULInt64("first_lba"),
|
||||
ULInt64("last_lba"),
|
||||
ULInt64("attributes"),
|
||||
String("name", 72, encoding="utf-16-le", padchar="\x00"),
|
||||
)
|
||||
|
||||
GPT_partitions = RepeatUntil(lambda obj, ctx: obj["partition_type_guid"] == "\x00"*16, GPT_entry)
|
||||
|
||||
APPLE_ENCRYPTED = 0xAE
|
||||
MBR_entry = Struct("MBR_entry",
|
||||
Byte("status"),
|
||||
Bytes("chs_start",3),
|
||||
Byte("type"),
|
||||
Bytes("chs_last",3),
|
||||
ULInt32("lba_start"),
|
||||
ULInt32("num_sectors")
|
||||
)
|
||||
|
||||
MBR = Struct("MBR",
|
||||
String("code",440),
|
||||
ULInt32("signature"),
|
||||
ULInt16("zero"),
|
||||
Array(4, MBR_entry),
|
||||
OneOf(ULInt16("magic"), [0xAA55])
|
||||
)
|
||||
|
||||
def parse_mbr(data):
|
||||
try:
|
||||
mbr = MBR.parse(data)
|
||||
if mbr.MBR_entry[0].type == 0xEE:
|
||||
print "Found protective MBR"
|
||||
return None
|
||||
res = mbr.MBR_entry[:2]
|
||||
for p in res:
|
||||
p.first_lba = p.lba_start
|
||||
p.last_lba = p.lba_start + p.num_sectors
|
||||
return res
|
||||
except:
|
||||
return None
|
||||
|
||||
def parse_gpt(data):
|
||||
gpt = GPT_header.parse(data)
|
||||
if gpt.signature != "EFI PART":
|
||||
return None
|
||||
print "Found GPT header current_lba=%d partition_entries_lba=%d" % (gpt.current_lba, gpt.partition_entries_lba)
|
||||
assert gpt.partition_entries_lba > gpt.current_lba
|
||||
check = gpt.crc
|
||||
gpt.crc = 0
|
||||
actual = crc32(GPT_header.build(gpt))
|
||||
if actual != check:
|
||||
print "GPT crc check fail %d vs %d" % (actual, check)
|
||||
return None
|
||||
return gpt
|
||||
|
||||
def parse_lwvm(data, pageSize):
|
||||
try:
|
||||
hdr = LWVM_header.parse(data)
|
||||
if hdr.type != LWVM_MAGIC:
|
||||
print "LwVM magic mismatch"
|
||||
return
|
||||
tocheck = data[:44] + "\x00\x00\x00\x00" + data[48:0x1000]
|
||||
check = crc32(tocheck) & 0xffffffff
|
||||
if check != hdr.crc32:
|
||||
return None
|
||||
print "LwVM header CRC OK"
|
||||
partitions = hdr.LWVM_partitionRecord[:hdr.numPartitions]
|
||||
deviceSize=0
|
||||
#XXX: HAAAAAAAX
|
||||
for s in [8, 16, 32, 64, 128]:
|
||||
if hdr.mediaSize < (s* 1024*1024*1024):
|
||||
deviceSize = s
|
||||
break
|
||||
for i in xrange(len(hdr.chunks)):
|
||||
if hdr.chunks[i] == 0x0:
|
||||
lba0 = (i * deviceSize*1024*1024) / pageSize
|
||||
partitions[0].first_lba = lba0
|
||||
partitions[0].last_lba = lba0 + (partitions[0].end - partitions[0].begin) / pageSize
|
||||
elif hdr.chunks[i] == 0x1000:
|
||||
lbad = (i * deviceSize*1024*1024) / pageSize
|
||||
partitions[1].first_lba = lbad
|
||||
partitions[1].last_lba = lbad + (partitions[1].end - partitions[1].begin) / pageSize
|
||||
return partitions
|
||||
except:
|
||||
return None
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
from progressbar import ProgressBar
|
||||
from usbmux import usbmux
|
||||
from util import hexdump, sizeof_fmt
|
||||
import datetime
|
||||
import hashlib
|
||||
import struct
|
||||
import os
|
||||
|
||||
CMD_DUMP = 0
|
||||
CMD_PROXY = 1
|
||||
kIOFlashStorageOptionRawPageIO = 0x002
|
||||
kIOFlashStorageOptionBootPageIO = 0x100
|
||||
|
||||
class IOFlashStorageKitClient(object):
|
||||
def __init__(self, udid=None, host="localhost", port=2000):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.connect(udid)
|
||||
|
||||
def connect(self, udid=None):
|
||||
mux = usbmux.USBMux()
|
||||
mux.process(1.0)
|
||||
if not mux.devices:
|
||||
print "Waiting for iOS device"
|
||||
while not mux.devices:
|
||||
mux.process(1.0)
|
||||
if not mux.devices:
|
||||
print "No device found"
|
||||
return
|
||||
dev = mux.devices[0]
|
||||
try:
|
||||
self.s = mux.connect(dev, self.port)
|
||||
except:
|
||||
raise Exception("Connexion to device %s port %d failed" % (dev.serial, self.port))
|
||||
|
||||
def send_command(self, cmd):
|
||||
return self.s.send(struct.pack("<L", cmd))
|
||||
|
||||
def dump_nand(self, filename):
|
||||
f = open(filename, "wb")
|
||||
self.send_command(CMD_DUMP)
|
||||
zz = self.s.recv(8)
|
||||
totalSize = struct.unpack("<Q", zz)[0]
|
||||
recvSize = 0
|
||||
print "Dumping %s NAND to %s" % (sizeof_fmt(totalSize), filename)
|
||||
pbar = ProgressBar(totalSize)
|
||||
pbar.start()
|
||||
h = hashlib.sha1()
|
||||
while recvSize < totalSize:
|
||||
pbar.update(recvSize)
|
||||
d = self.s.recv(8192*2)
|
||||
if not d or len(d) == 0:
|
||||
break
|
||||
h.update(d)
|
||||
f.write(d)
|
||||
recvSize += len(d)
|
||||
pbar.finish()
|
||||
f.close()
|
||||
print "NAND dump time : %s" % str(datetime.timedelta(seconds=pbar.seconds_elapsed))
|
||||
print "SHA1: %s" % h.hexdigest()
|
||||
if recvSize != totalSize:
|
||||
print "dump_nand FAIL"
|
||||
|
||||
class NANDRemote(object):
|
||||
def __init__(self, pageSize, spareSize, pagesPerBlock, bfn):
|
||||
self.spareSize = spareSize
|
||||
self.pageSize = pageSize
|
||||
self.pagesPerBlock = pagesPerBlock
|
||||
self.bootFromNand = bfn
|
||||
self.client = IOFlashStorageKitClient()
|
||||
self.client.send_command(CMD_PROXY)
|
||||
|
||||
def readPage(self, ce, page):
|
||||
options = 0
|
||||
spareSize = self.spareSize
|
||||
if self.bootFromNand and page < 16*self.pagesPerBlock:#XXX hardcoded for now
|
||||
options = kIOFlashStorageOptionBootPageIO
|
||||
spareSize = 0
|
||||
d = struct.pack("<LLLL", ce, page, spareSize, options)
|
||||
|
||||
self.client.s.send(d)
|
||||
|
||||
torecv = self.pageSize+8+spareSize
|
||||
d = ""
|
||||
while len(d) != torecv:
|
||||
zz = self.client.s.recv(torecv)
|
||||
if not zz:
|
||||
break
|
||||
d += zz
|
||||
pageData = d[:self.pageSize]
|
||||
spareData = d[self.pageSize:self.pageSize+spareSize]
|
||||
r1,r2 = struct.unpack("<LL", d[self.pageSize+spareSize:self.pageSize+spareSize+8])
|
||||
|
||||
if r1 == 0xe00002e5:
|
||||
return None, None
|
||||
#print ce, page, "%x" % r1, r2, pageData[:0x10].encode("hex"), spareData[:0x10].encode("hex")
|
||||
if spareData == "":
|
||||
spareData = "\xFF" * self.spareSize
|
||||
return spareData, pageData
|
||||
@@ -0,0 +1,83 @@
|
||||
from construct.core import Struct, Union
|
||||
from construct.macros import *
|
||||
|
||||
#hardcoded iOS keys
|
||||
META_KEY = "92a742ab08c969bf006c9412d3cc79a5".decode("hex")
|
||||
FILESYSTEM_KEY = "f65dae950e906c42b254cc58fc78eece".decode("hex")
|
||||
|
||||
def next_power_of_two(z):
|
||||
i = 1
|
||||
while i < z:
|
||||
i <<= 1
|
||||
return i
|
||||
|
||||
def CEIL_DIVIDE(val, amt):
|
||||
return (((val) + (amt) - 1) / (amt))
|
||||
|
||||
#from openiboot/plat-s5l8920/h2fmi.c
|
||||
#blocks_per_ce, pages_per_block, bytes_per_page, bytes_per_spare, unk5, unk6, unk7, banks_per_ce, unk9
|
||||
#some values change in openiboot/plat-a4/h2fmi.c, but banks_per_ce is ok
|
||||
nand_chip_info = {
|
||||
0x7294D7EC : [ 0x1038, 0x80, 0x2000, 0x1B4, 0xC, 0, 8, 1, 0 ],
|
||||
0x72D5DEEC : [ 0x2070, 0x80, 0x2000, 0x1B4, 0xC, 0, 8, 2, 0 ],
|
||||
0x29D5D7EC : [ 0x2000, 0x80, 0x1000, 0xDA, 8, 0, 2, 2, 0 ],
|
||||
0x2994D5EC : [ 0x1000, 0x80, 0x1000, 0xDA, 8, 0, 2, 1, 0 ],
|
||||
0xB614D5EC : [ 0x1000, 0x80, 0x1000, 0x80, 4, 0, 2, 1, 0 ],
|
||||
0xB655D7EC : [ 0x2000, 0x80, 0x1000, 0x80, 4, 0, 2, 2, 0 ],
|
||||
0xB614D5AD : [ 0x1000, 0x80, 0x1000, 0x80, 4, 0, 3, 1, 0 ],
|
||||
0x3294E798 : [ 0x1004, 0x80, 0x2000, 0x1C0, 0x10, 0, 1, 1, 0 ],
|
||||
0xBA94D598 : [ 0x1000, 0x80, 0x1000, 0xDA, 8, 0, 1, 1, 0 ],
|
||||
0xBA95D798 : [ 0x2000, 0x80, 0x1000, 0xDA, 8, 0, 1, 2, 0 ],
|
||||
0x3294D798 : [ 0x1034, 0x80, 0x2000, 0x178, 8, 0, 1, 1, 0 ],
|
||||
0x3295DE98 : [ 0x2068, 0x80, 0x2000, 0x178, 8, 0, 1, 2, 0 ],
|
||||
0x3295EE98 : [ 0x2008, 0x80, 0x2000, 0x1C0, 0x18, 0, 1, 2, 0 ],
|
||||
0x3E94D789 : [ 0x2000, 0x80, 0x1000, 0xDA, 0x10, 0, 5, 1, 0 ],
|
||||
0x3ED5D789 : [ 0x2000, 0x80, 0x1000, 0xDA, 8, 0, 6, 2, 0 ],
|
||||
0x3ED5D72C : [ 0x2000, 0x80, 0x1000, 0xDA, 8, 0, 5, 2, 0 ],
|
||||
0x3E94D72C : [ 0x2000, 0x80, 0x1000, 0xDA, 0xC, 0, 7, 1, 0 ],
|
||||
0x4604682C : [ 0x1000, 0x100, 0x1000, 0xE0, 0xC, 0, 7, 1, 0 ],
|
||||
0x3294D745 : [ 0x1000, 0x80, 0x2000, 0x178, 8, 0, 9, 1, 0 ],
|
||||
0x3295DE45 : [ 0x2000, 0x80, 0x2000, 0x178, 8, 0, 9, 2, 0 ],
|
||||
0x32944845 : [ 0x1000, 0x80, 0x2000, 0x1C0, 8, 0, 9, 1, 0 ],
|
||||
0x32956845 : [ 0x2000, 0x80, 0x2000, 0x1C0, 8, 0, 9, 2, 0 ],
|
||||
}
|
||||
|
||||
#https://github.com/iDroid-Project/openiBoot/blob/master/openiboot/plat-a4/h2fmi.c
|
||||
def gen_h2fmi_hash_table():
|
||||
val = 0x50F4546A;
|
||||
h2fmi_hash_table = [0]*256
|
||||
for i in xrange(256):
|
||||
val = ((0x19660D * val) + 0x3C6EF35F) & 0xffffffff;
|
||||
for j in xrange(762):
|
||||
val = ((0x19660D * val) + 0x3C6EF35F) & 0xffffffff;
|
||||
h2fmi_hash_table[i] = val & 0xffffffff
|
||||
return h2fmi_hash_table
|
||||
|
||||
# Page types (as defined in the spare data "type" bitfield)
|
||||
PAGETYPE_INDEX = 0x4 #Index block indicator
|
||||
PAGETYPE_LBN = 0x10 # User data
|
||||
PAGETYPE_FTL_CLEAN = 0x20 # FTL context (unmounted, clean)
|
||||
PAGETYPE_VFL = 0x80 #/ VFL context
|
||||
|
||||
SpareData = Struct("SpareData",
|
||||
ULInt32("lpn"),
|
||||
ULInt32("usn"),
|
||||
ULInt8("field_8"),
|
||||
ULInt8("type"),
|
||||
ULInt16("field_A")
|
||||
)
|
||||
|
||||
# Block status (as defined in the BlockStruct structure)
|
||||
BLOCKSTATUS_ALLOCATED = 0x1
|
||||
BLOCKSTATUS_FTLCTRL = 0x2
|
||||
BLOCKSTATUS_GC = 0x4
|
||||
BLOCKSTATUS_CURRENT = 0x8
|
||||
BLOCKSTATUS_FTLCTRL_SEL = 0x10
|
||||
BLOCKSTATUS_I_GC = 0x20
|
||||
BLOCKSTATUS_I_ALLOCATED = 0x40
|
||||
BLOCKSTATUS_I_CURRENT = 0x80
|
||||
BLOCKSTATUS_FREE = 0xFF
|
||||
|
||||
ERROR_ARG = 0x80000001
|
||||
ERROR_NAND = 0x80000002
|
||||
ERROR_EMPTY = 0x80000003
|
||||
188
dump-imessages/iphone-dataprotection/python_scripts/nand/vfl.py
Normal file
188
dump-imessages/iphone-dataprotection/python_scripts/nand/vfl.py
Normal file
@@ -0,0 +1,188 @@
|
||||
from array import array
|
||||
from construct.core import Struct, Union
|
||||
from construct.macros import *
|
||||
from structs import next_power_of_two, CEIL_DIVIDE, PAGETYPE_VFL
|
||||
import struct
|
||||
|
||||
"""
|
||||
https://github.com/iDroid-Project/openiBoot/blob/master/plat-s5l8900/includes/s5l8900/ftl.h
|
||||
https://github.com/iDroid-Project/openiBoot/blob/master/plat-s5l8900/ftl.c
|
||||
https://github.com/iDroid-Project/openiBoot/blob/master/plat-s5l8900/nand.c
|
||||
|
||||
static const NANDDeviceType SupportedDevices[] = {
|
||||
"""
|
||||
SupportedDevices = {0x2555D5EC: [8192, 128, 4, 64, 4, 2, 4, 2, 7744, 4, 6],
|
||||
0xB614D5EC: [4096, 128, 8, 128, 4, 2, 4, 2, 3872, 4, 6],
|
||||
0xB655D7EC: [8192, 128, 8, 128, 4, 2, 4, 2, 7744, 4, 6],
|
||||
0xA514D3AD: [4096, 128, 4, 64, 4, 2, 4, 2, 3872, 4, 6],
|
||||
0xA555D5AD: [8192, 128, 4, 64, 4, 2, 4, 2, 7744, 4, 6],
|
||||
0xB614D5AD: [4096, 128, 8, 128, 4, 2, 4, 2, 3872, 4, 6],
|
||||
0xB655D7AD: [8192, 128, 8, 128, 4, 2, 4, 2, 7744, 4, 6],
|
||||
0xA585D598: [8320, 128, 4, 64, 6, 2, 4, 2, 7744, 4, 6],
|
||||
0xBA94D598: [4096, 128, 8, 216, 6, 2, 4, 2, 3872, 8, 8],
|
||||
0xBA95D798: [8192, 128, 8, 216, 6, 2, 4, 2, 7744, 8, 8],
|
||||
0x3ED5D789: [8192, 128, 8, 216, 4, 2, 4, 2, 7744, 8, 8],
|
||||
0x3E94D589: [4096, 128, 8, 216, 4, 2, 4, 2, 3872, 8, 8],
|
||||
0x3ED5D72C: [8192, 128, 8, 216, 4, 2, 4, 2, 7744, 8, 8],
|
||||
0x3E94D52C: [4096, 128, 8, 216, 4, 2, 4, 2, 3872, 8, 8]
|
||||
}
|
||||
|
||||
_vfl_vfl_context = Struct("_vfl_vfl_context",
|
||||
ULInt32("usn_inc"),
|
||||
Array(3, ULInt16("control_block")),
|
||||
ULInt16("unk1"),
|
||||
ULInt32("usn_dec"),
|
||||
ULInt16("active_context_block"),
|
||||
ULInt16("next_context_page"),
|
||||
ULInt16("unk2"),
|
||||
ULInt16("field_16"),
|
||||
ULInt16("field_18"),
|
||||
ULInt16("num_reserved_blocks"),
|
||||
ULInt16("reserved_block_pool_start"),
|
||||
ULInt16("total_reserved_blocks"),
|
||||
Array(820, ULInt16("reserved_block_pool_map")),
|
||||
Array(282, ULInt8("bad_block_table")),
|
||||
Array(4, ULInt16("vfl_context_block")),
|
||||
ULInt16("remapping_schedule_start"),
|
||||
Array(0x48, ULInt8("unk3")),
|
||||
ULInt32("version"),
|
||||
ULInt32("checksum1"),
|
||||
ULInt32("checksum2")
|
||||
)
|
||||
|
||||
_vfl_vsvfl_spare_data = Struct("_vfl_vsvfl_spare_data",
|
||||
Union("foo",
|
||||
Struct("user",ULInt32("logicalPageNumber"),ULInt32("usn")),
|
||||
Struct("meta",ULInt32("usnDec"),ULInt16("idx"), ULInt8("field_6"), ULInt8("field_7"))
|
||||
),
|
||||
ULInt8("type2"),
|
||||
ULInt8("type1"),
|
||||
ULInt8("eccMark"),
|
||||
ULInt8("field_B"),
|
||||
)
|
||||
|
||||
def vfl_checksum(data):
|
||||
x = 0
|
||||
y = 0
|
||||
for z in array("I", data):
|
||||
x = (x + z) & 0xffffffff
|
||||
y = (y ^ z) & 0xffffffff
|
||||
return (x + 0xAABBCCDD) & 0xffffffff, (y ^ 0xAABBCCDD) & 0xffffffff
|
||||
|
||||
def vfl_check_checksum(ctx, ctxtype):
|
||||
c1, c2 = vfl_checksum(ctxtype.build(ctx)[:-8])
|
||||
return c1 == ctx.checksum1 and c2 == ctx.checksum2
|
||||
|
||||
class VFL(object):
|
||||
def __init__(self, nand):
|
||||
self.nand = nand
|
||||
#XXX check
|
||||
self.banks_total = nand.nCEs * nand.banks_per_ce_physical
|
||||
self.num_ce = nand.nCEs
|
||||
self.banks_per_ce = nand.banks_per_ce_physical
|
||||
self.blocks_per_ce = nand.blocksPerCE
|
||||
self.pages_per_block = nand.pagesPerBlock
|
||||
self.pages_per_block_2 = next_power_of_two(self.pages_per_block)
|
||||
self.pages_per_sublk = self.pages_per_block * self.banks_per_ce * self.num_ce
|
||||
self.blocks_per_bank = self.blocks_per_ce / self.banks_per_ce
|
||||
self.blocks_per_bank_vfl = self.blocks_per_ce / self.banks_per_ce
|
||||
self.vendorType = nand.vendorType
|
||||
self.fs_start_block = 5
|
||||
|
||||
#field_4 = 5;
|
||||
if not SupportedDevices.has_key(nand.deviceReadId):
|
||||
raise Exception("VFL: unsupported device 0x%x" % nand.deviceReadId)
|
||||
userSuBlksTotal = self.userSuBlksTotal = SupportedDevices[nand.deviceReadId][8]#7744
|
||||
userPagesTotal = userSuBlksTotal * self.pages_per_sublk
|
||||
suBlksTotal = self.blocks_per_ce
|
||||
|
||||
FTLData_field_2 = suBlksTotal - userSuBlksTotal - 28
|
||||
print suBlksTotal, userSuBlksTotal, FTLData_field_2
|
||||
FTLData_field_4 = FTLData_field_2 + 5
|
||||
self.FTLData_field_4 = FTLData_field_4
|
||||
#FTLData_sysSuBlks = FTLData_field_2 + 4
|
||||
#FTLData_field_6 = 3
|
||||
#FTLData_field_8 = 23
|
||||
|
||||
self.vflContexts = []
|
||||
self.bbt = []
|
||||
self.current_version = 0
|
||||
self.context = None
|
||||
reserved_blocks = 0
|
||||
fs_start_block = reserved_blocks+10 #XXX
|
||||
for ce in xrange(self.num_ce):
|
||||
for b in xrange(reserved_blocks, fs_start_block):
|
||||
s, d = nand.readMetaPage(ce, b, 0, _vfl_vsvfl_spare_data)
|
||||
if not d:
|
||||
continue
|
||||
vflctx = _vfl_vfl_context.parse(d)
|
||||
if not vfl_check_checksum(vflctx, _vfl_vfl_context):
|
||||
vflctx = None
|
||||
continue
|
||||
break
|
||||
MostRecentVFLCxtBlock = -1
|
||||
minUsn = 0xFFFFFFFF
|
||||
for b in vflctx.vfl_context_block:
|
||||
s, d = nand.readMetaPage(ce, b, 0, _vfl_vsvfl_spare_data)
|
||||
if not d:
|
||||
continue
|
||||
if s.foo.meta.usnDec > 0 and s.foo.meta.usnDec <= minUsn:
|
||||
minUsn = s.foo.meta.usnDec;
|
||||
MostRecentVFLCxtBlock = b
|
||||
if MostRecentVFLCxtBlock == -1:
|
||||
print "MostRecentVFLCxtBlock == -1"
|
||||
return
|
||||
last = None
|
||||
for pageNum in xrange(0, self.pages_per_block, 1):
|
||||
s,d = nand.readMetaPage(ce, MostRecentVFLCxtBlock, pageNum, _vfl_vsvfl_spare_data)
|
||||
if not d:
|
||||
break
|
||||
vflctx = _vfl_vfl_context.parse(d)
|
||||
if vfl_check_checksum(vflctx, _vfl_vfl_context):
|
||||
last = vflctx
|
||||
if not last:
|
||||
raise Exception("VFL open FAIL 1")
|
||||
self.vflContexts.append(last)
|
||||
if last.version == 1 and last.usn_inc >= self.current_version:
|
||||
self.current_version = last.usn_inc
|
||||
self.context = last
|
||||
if not self.context:
|
||||
raise Exception("VFL open FAIL")
|
||||
|
||||
print "VFL context open OK"
|
||||
|
||||
def VFL_get_FTLCtrlBlock(self):
|
||||
for ctx in self.vflContexts:
|
||||
if ctx.usn_inc == self.current_version:
|
||||
return ctx.control_block
|
||||
|
||||
def vfl_is_good_block(self, bbt, block):
|
||||
if block > self.blocks_per_ce:
|
||||
raise Exception("vfl_is_good_block block %d out of bounds" % block)
|
||||
index = block/8
|
||||
return ((bbt[index / 8] >> (7 - (index % 8))) & 0x1) == 0x1
|
||||
|
||||
def virtual_block_to_physical_block(self, ce, pBlock):
|
||||
if self.vfl_is_good_block(self.vflContexts[ce].bad_block_table, pBlock):
|
||||
return pBlock
|
||||
ctx = self.vflContexts[ce]
|
||||
for pwDesPbn in xrange(0, ctx.num_reserved_blocks):
|
||||
if ctx.reserved_block_pool_map[pwDesPbn] == pBlock:
|
||||
if pwDesPbn > self.blocks_per_ce:
|
||||
raise Exception("Destination physical block for remapping is greater than number of blocks per bank!")
|
||||
return ctx.reserved_block_pool_start + pwDesPbn
|
||||
print "Bad block %d not remapped" % pBlock
|
||||
return pBlock
|
||||
|
||||
def virtual_page_number_to_virtual_address(self, vpn):
|
||||
vbank = vpn % self.num_ce
|
||||
vblock = vpn / self.pages_per_sublk
|
||||
vpage = (vpn / self.num_ce) % self.pages_per_block
|
||||
return vbank, vblock, vpage
|
||||
|
||||
def read_single_page(self, vpn, key=None, lpn=None):
|
||||
vpn += self.pages_per_sublk * self.FTLData_field_4
|
||||
vbank, vblock, vpage = self.virtual_page_number_to_virtual_address(vpn)
|
||||
pblock = self.virtual_block_to_physical_block(vbank, vblock)
|
||||
#print "VFL read_single_page %d => %d, %d" % (vpn,ce,pPage)
|
||||
return self.nand.readPage(vbank, pblock*self.nand.pagesPerBlock + vpage, key, lpn)
|
||||
@@ -0,0 +1,193 @@
|
||||
from construct import *
|
||||
from structs import next_power_of_two, PAGETYPE_VFL, CEIL_DIVIDE
|
||||
from vfl import vfl_check_checksum, _vfl_vsvfl_spare_data
|
||||
|
||||
"""
|
||||
https://github.com/iDroid-Project/openiBoot/blob/master/vfl-vsvfl/vsvfl.c
|
||||
https://github.com/iDroid-Project/openiBoot/blob/master/vfl-vsvfl/includes/vfl/vsvfl.h
|
||||
"""
|
||||
|
||||
_vfl_vsvfl_context = Struct("_vfl_vsvfl_context",
|
||||
ULInt32("usn_inc"),
|
||||
ULInt32("usn_dec"),
|
||||
ULInt32("ftl_type"),
|
||||
ULInt16("usn_block"),
|
||||
ULInt16("usn_page"),
|
||||
ULInt16("active_context_block"),
|
||||
ULInt16("write_failure_count"),
|
||||
ULInt16("bad_block_count"),
|
||||
Array(4, ULInt8("replaced_block_count")),
|
||||
ULInt16("num_reserved_blocks"),
|
||||
ULInt16("field_1C"),
|
||||
ULInt16("total_reserved_blocks"),
|
||||
Array(6, ULInt8("field_20")),
|
||||
Array(820, ULInt16("reserved_block_pool_map")),
|
||||
Array(4, ULInt16("vfl_context_block")),
|
||||
ULInt16("usable_blocks_per_bank"),
|
||||
ULInt16("reserved_block_pool_start"),
|
||||
Array(3, ULInt16("control_block")),
|
||||
ULInt16("scrub_list_length"),
|
||||
Array(20, ULInt16("scrub_list")),
|
||||
Array(4, ULInt32("field_6CA")),
|
||||
ULInt32("vendor_type"),
|
||||
Array(204, ULInt8("field_6DE")),
|
||||
ULInt16("remapping_schedule_start"),
|
||||
Array(0x48, ULInt8("unk3")),
|
||||
ULInt32("version"),
|
||||
ULInt32("checksum1"),
|
||||
ULInt32("checksum2")
|
||||
)
|
||||
|
||||
|
||||
class VSVFL(object):
|
||||
def __init__(self, nand):
|
||||
self.nand = nand
|
||||
self.banks_per_ce_vfl = 1
|
||||
if self.nand.vendorType in [0x100010, 0x100014, 0x120014, 0x150011]:
|
||||
self.banks_per_ce_vfl = 2
|
||||
self.banks_total = nand.nCEs * self.banks_per_ce_vfl
|
||||
self.num_ce = nand.nCEs
|
||||
self.banks_per_ce = nand.banks_per_ce_physical
|
||||
self.blocks_per_ce = nand.blocksPerCE
|
||||
self.pages_per_block = nand.pagesPerBlock
|
||||
self.pages_per_block_2 = next_power_of_two(self.pages_per_block)
|
||||
self.pages_per_sublk = self.pages_per_block * self.banks_per_ce_vfl * self.num_ce
|
||||
self.blocks_per_bank = self.blocks_per_ce / self.banks_per_ce
|
||||
self.blocks_per_bank_vfl = self.blocks_per_ce / self.banks_per_ce_vfl
|
||||
self.vendorType = nand.vendorType
|
||||
if self.vendorType == 0x10001:
|
||||
self.virtual_to_physical = self.virtual_to_physical_10001
|
||||
elif self.vendorType == 0x150011:
|
||||
self.virtual_to_physical = self.virtual_to_physical_100014
|
||||
elif self.vendorType in [0x100010, 0x100014, 0x120014]:
|
||||
self.virtual_to_physical = self.virtual_to_physical_150011
|
||||
else:
|
||||
raise Exception("VSVFL: unsupported vendor 0x%x" % self.vendorType)
|
||||
self.bank_address_space = nand.bank_address_space
|
||||
self.vflContexts = []
|
||||
self.bbt = []
|
||||
self.current_version = 0
|
||||
reserved_blocks = 0
|
||||
if self.nand.bfn:
|
||||
reserved_blocks = 16
|
||||
fs_start_block = reserved_blocks+16 #XXX
|
||||
for ce in xrange(self.num_ce):
|
||||
vflctx = None
|
||||
for b in xrange(reserved_blocks, fs_start_block):
|
||||
s, d = nand.readMetaPage(ce, b, 0, _vfl_vsvfl_spare_data)
|
||||
if not d:
|
||||
continue
|
||||
vflctx = _vfl_vsvfl_context.parse(d)
|
||||
if not vfl_check_checksum(vflctx, _vfl_vsvfl_context):
|
||||
vflctx = None
|
||||
continue
|
||||
break
|
||||
if not vflctx:
|
||||
raise Exception("Unable to find VSVFL context for CE %d" % ce)
|
||||
MostRecentVFLCxtBlock = -1
|
||||
minUsn = 0xFFFFFFFF
|
||||
for b in vflctx.vfl_context_block:
|
||||
s, d = nand.readMetaPage(ce, b, 0, _vfl_vsvfl_spare_data)
|
||||
if not d or s.type1 != PAGETYPE_VFL:
|
||||
continue
|
||||
if s.foo.meta.usnDec > 0 and s.foo.meta.usnDec <= minUsn:
|
||||
minUsn = s.foo.meta.usnDec;
|
||||
MostRecentVFLCxtBlock = b
|
||||
if MostRecentVFLCxtBlock == -1:
|
||||
print "MostRecentVFLCxtBlock == -1"
|
||||
return
|
||||
last = None
|
||||
for pageNum in xrange(0, self.pages_per_block, 1):
|
||||
s,d = nand.readMetaPage(ce, MostRecentVFLCxtBlock, pageNum, _vfl_vsvfl_spare_data)
|
||||
if not d or s.type1 != PAGETYPE_VFL:
|
||||
break
|
||||
last = d
|
||||
vflctx = _vfl_vsvfl_context.parse(last)
|
||||
if not vfl_check_checksum(vflctx, _vfl_vsvfl_context):
|
||||
print "VSVFL checksum FAIL"
|
||||
self.vflContexts.append(vflctx)
|
||||
if vflctx.version == 2 and vflctx.usn_inc >= self.current_version:
|
||||
self.current_version = vflctx.usn_inc
|
||||
self.context = vflctx
|
||||
if not self.context:
|
||||
raise Exception("VSVFL open FAIL")
|
||||
|
||||
num_reserved = self.vflContexts[0].reserved_block_pool_start
|
||||
num_non_reserved = self.blocks_per_bank_vfl - num_reserved
|
||||
for ce in xrange(self.num_ce):
|
||||
bbt = [0xFF] * (CEIL_DIVIDE(self.blocks_per_ce, 8))
|
||||
ctx = self.vflContexts[ce]
|
||||
for bank in xrange(0, self.banks_per_ce_vfl):
|
||||
for i in xrange(0, num_non_reserved):
|
||||
mapEntry = ctx.reserved_block_pool_map[bank*num_non_reserved + i]
|
||||
if mapEntry == 0xFFF0:
|
||||
continue
|
||||
if mapEntry < self.blocks_per_ce:
|
||||
pBlock = mapEntry
|
||||
elif mapEntry > 0xFFF0:
|
||||
pBlock = self.virtual_block_to_physical_block(ce + bank * self.num_ce, num_reserved + i)
|
||||
else:
|
||||
print "VSVFL: bad map table"
|
||||
bbt[pBlock / 8] &= ~(1 << (pBlock % 8))
|
||||
self.bbt.append(bbt)
|
||||
print "VSVFL context open OK"
|
||||
|
||||
def VFL_get_FTLCtrlBlock(self):
|
||||
for ctx in self.vflContexts:
|
||||
if ctx.usn_inc == self.current_version:
|
||||
return ctx.control_block
|
||||
|
||||
def virtual_to_physical_10001(self, vBank, vPage):
|
||||
return vBank, vPage
|
||||
|
||||
def virtual_to_physical_100014(self, vBank, vPage):
|
||||
pBank = vBank / self.num_ce;
|
||||
pPage = ((self.pages_per_block - 1) & vPage) | (2 * (~(self.pages_per_block - 1) & vPage))
|
||||
if (pBank & 1):
|
||||
pPage |= self.pages_per_block
|
||||
return vBank % self.num_ce, pPage
|
||||
|
||||
def virtual_to_physical_150011(self, vBank, vPage):
|
||||
pBlock = 2 * (vPage / self.pages_per_block)
|
||||
if(vBank % (2 * self.num_ce) >= self.num_ce):
|
||||
pBlock += 1
|
||||
return vBank % self.num_ce, self.pages_per_block * pBlock | (vPage % 128)
|
||||
|
||||
def virtual_block_to_physical_block(self, vBank, vBlock):
|
||||
ce, pPage = self.virtual_to_physical(vBank, self.pages_per_block * vBlock)
|
||||
return pPage / self.pages_per_block
|
||||
|
||||
def vfl_is_good_block(self, bbt, block):
|
||||
if block > self.blocks_per_ce:
|
||||
raise Exception("vfl_is_good_block block %d out of bounds" % block)
|
||||
return (bbt[block / 8] & (1 << (block % 8))) != 0
|
||||
|
||||
def remap_block(self, ce, pBlock):
|
||||
if self.vfl_is_good_block(self.bbt[ce], pBlock):
|
||||
return pBlock
|
||||
ctx = self.vflContexts[ce]
|
||||
for pwDesPbn in xrange(0, self.blocks_per_ce - ctx.reserved_block_pool_start * self.banks_per_ce_vfl):
|
||||
if ctx.reserved_block_pool_map[pwDesPbn] == pBlock:
|
||||
vBank = ce + self.num_ce * (pwDesPbn / (self.blocks_per_bank_vfl - ctx.reserved_block_pool_start))
|
||||
vBlock = ctx.reserved_block_pool_start + (pwDesPbn % (self.blocks_per_bank_vfl - ctx.reserved_block_pool_start))
|
||||
z = self.virtual_block_to_physical_block(vBank, vBlock)
|
||||
#print "remapped block %d => %d" % (pBlock, z)
|
||||
return z
|
||||
print "Bad block %d not remapped" % pBlock
|
||||
return pBlock
|
||||
|
||||
def virtual_page_number_to_physical(self, vpn):
|
||||
vBank = vpn % self.banks_total
|
||||
ce = vBank % self.nand.nCEs
|
||||
|
||||
pBlock = self.virtual_block_to_physical_block(vBank, vpn / self.pages_per_sublk)
|
||||
pBlock = self.remap_block(ce, pBlock)
|
||||
bank_offset = self.bank_address_space * (pBlock / self.blocks_per_bank)
|
||||
page = self.pages_per_block_2 * (bank_offset + (pBlock % self.blocks_per_bank)) \
|
||||
+ ((vpn % self.pages_per_sublk) / self.banks_total)
|
||||
return ce, page
|
||||
|
||||
def read_single_page(self, vpn, key=None, lpn=None):
|
||||
ce, pPage = self.virtual_page_number_to_physical(vpn)
|
||||
#print "VFL read_single_page %d => %d, %d" % (vpn,ce,pPage)
|
||||
return self.nand.readPage(ce, pPage, key, lpn)
|
||||
@@ -0,0 +1,357 @@
|
||||
from array import array
|
||||
from construct.core import Struct, Union
|
||||
from construct.macros import *
|
||||
from progressbar import ProgressBar
|
||||
from structs import *
|
||||
import struct
|
||||
|
||||
|
||||
#https://github.com/iDroid-Project/openiBoot/blob/master/openiboot/ftl-yaftl/yaftl.c
|
||||
YAFTL_CXT = Struct("YAFTL_CXT",
|
||||
String("version", 4),
|
||||
ULInt32("unknCalculatedValue0"),
|
||||
ULInt32("totalPages"),
|
||||
ULInt32("latestUserBlock"),
|
||||
ULInt32("cxt_unkn0_usn"),
|
||||
ULInt32("latestIndexBlock"),
|
||||
ULInt32("maxIndexUsn"),
|
||||
ULInt32("blockStatsField4"),
|
||||
ULInt32("blockStatsField10"),
|
||||
ULInt32("numAllocatedBlocks"),
|
||||
ULInt32("numIAllocatedBlocks"),
|
||||
ULInt32("unk184_0xA"),
|
||||
Array(10, ULInt32("cxt_unkn1")),
|
||||
ULInt32("field_58"),
|
||||
ULInt16("tocArrayLength"),
|
||||
ULInt16("tocPagesPerBlock"),
|
||||
ULInt16("tocEntriesPerPage"),
|
||||
ULInt16("unkn_0x2A"),
|
||||
ULInt16("userPagesPerBlock"),
|
||||
ULInt16("unk64"),
|
||||
Array(11, ULInt32("cxt_unkn2")),
|
||||
ULInt8("unk188_0x63"),
|
||||
)
|
||||
|
||||
TOCStruct = Struct("TOCStruct",
|
||||
ULInt32("indexPage"),
|
||||
ULInt16("cacheNum"),
|
||||
ULInt16("TOCUnkMember2"),
|
||||
)
|
||||
|
||||
BlockStats = Struct("BlockStats",
|
||||
ULInt32("numAllocated"),
|
||||
ULInt32("field_4"),
|
||||
ULInt32("numValidDPages"),
|
||||
ULInt32("numIAllocated"),
|
||||
ULInt32("field_10"),
|
||||
ULInt32("numValidIPages"),
|
||||
ULInt32("numFree"),
|
||||
ULInt32("field_1C"),
|
||||
)
|
||||
|
||||
|
||||
class YAFTL(object):
|
||||
def __init__(self, vfl, usn=0):
|
||||
self.vfl = vfl
|
||||
self.lpnToVpn = None
|
||||
bytesPerPage = vfl.nand.pageSize
|
||||
numBlocks = vfl.context.usable_blocks_per_bank
|
||||
self.blankPage = bytesPerPage * "\x00"
|
||||
self.numBlocks = numBlocks
|
||||
self.tocPagesPerBlock = vfl.pages_per_sublk * 4 / bytesPerPage
|
||||
if vfl.pages_per_sublk * 4 % bytesPerPage:
|
||||
self.tocPagesPerBlock += 1
|
||||
self.tocEntriesPerPage = bytesPerPage / 4
|
||||
self.tocArrayLength = CEIL_DIVIDE(vfl.pages_per_sublk * numBlocks * 4, bytesPerPage)
|
||||
self.nPagesTocPageIndices = CEIL_DIVIDE(self.tocArrayLength * 4, bytesPerPage)
|
||||
self.nPagesBlockStatuses = CEIL_DIVIDE(numBlocks * 1, bytesPerPage)
|
||||
self.nPagesBlockReadCounts = CEIL_DIVIDE(numBlocks * 2, bytesPerPage)
|
||||
self.nPagesBlockEraseCounts = CEIL_DIVIDE(numBlocks * 4, bytesPerPage)
|
||||
self.nPagesBlockValidPagesDNumbers = self.nPagesBlockReadCounts
|
||||
self.nPagesBlockValidPagesINumbers = self.nPagesBlockReadCounts
|
||||
self.ctrlBlockPageOffset = self.nPagesTocPageIndices \
|
||||
+ self.nPagesBlockStatuses \
|
||||
+ self.nPagesBlockReadCounts \
|
||||
+ self.nPagesBlockEraseCounts \
|
||||
+ self.nPagesBlockValidPagesDNumbers \
|
||||
+ self.nPagesBlockValidPagesINumbers \
|
||||
+ 2 * self.tocPagesPerBlock \
|
||||
+ 2
|
||||
self.totalPages = (self.numBlocks - 8) * (self.vfl.pages_per_sublk - self.tocPagesPerBlock)# - unknCalculatedValue0
|
||||
self.userPagesPerBlock = self.vfl.pages_per_sublk - self.tocPagesPerBlock
|
||||
maxUsn = 0
|
||||
ftlCtrlBlock = -1
|
||||
for b in self.vfl.VFL_get_FTLCtrlBlock():
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk)
|
||||
if not d:
|
||||
continue
|
||||
if usn and s.usn > usn:
|
||||
break
|
||||
if s.usn > maxUsn:
|
||||
maxUsn = s.usn
|
||||
ftlCtrlBlock = b
|
||||
if ftlCtrlBlock == -1 or not maxUsn:
|
||||
print "ftlCtrlBlock not found, restore needed"
|
||||
self.YAFTL_restore()
|
||||
return
|
||||
i = 0
|
||||
maxUsn = 0
|
||||
while i < self.vfl.pages_per_sublk - self.ctrlBlockPageOffset:
|
||||
s,d = self.YAFTL_readPage(ftlCtrlBlock*self.vfl.pages_per_sublk + i + self.ctrlBlockPageOffset)
|
||||
if not d:
|
||||
if self.YAFTL_readCxtInfo(ftlCtrlBlock*self.vfl.pages_per_sublk + i):
|
||||
return
|
||||
print "YaFTL_readCxtInfo FAIL, restore needed maxUsn=%d" % maxUsn
|
||||
self.YAFTL_restore()
|
||||
return
|
||||
if s and s.usn > maxUsn:
|
||||
maxUsn = s.usn
|
||||
i += self.ctrlBlockPageOffset + 1
|
||||
print "YaFTL open fail"
|
||||
self.YAFTL_restore()
|
||||
|
||||
def readBTOCPages(self, block, maxVal):
|
||||
data = ""
|
||||
for i in xrange(self.tocPagesPerBlock):
|
||||
s,d = self.YAFTL_readPage((block+1) * self.vfl.pages_per_sublk - self.tocPagesPerBlock + i)
|
||||
if not s:
|
||||
return None
|
||||
data += d
|
||||
btoc = array("I",data)
|
||||
for i in xrange(len(btoc)):
|
||||
if btoc[i] > maxVal:
|
||||
btoc[i] = 0xFFFFFFFF
|
||||
return btoc
|
||||
|
||||
def YAFTL_restore(self):
|
||||
self.lpnToVpn = self.vfl.nand.loadCachedData("yaftlrestore")
|
||||
if self.lpnToVpn:
|
||||
print "Found cached FTL restore information"
|
||||
return
|
||||
userBlocks = {}
|
||||
indexBlocks = {}
|
||||
print "FTL restore in progress"
|
||||
pbar = ProgressBar(self.numBlocks)
|
||||
pbar.start()
|
||||
for b in xrange(0, self.numBlocks):
|
||||
pbar.update(b)
|
||||
#read fist page in block, if empty then block is empty
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + 0)
|
||||
if not s:
|
||||
continue
|
||||
if s.type == PAGETYPE_INDEX:
|
||||
indexBlocks[s.usn] = b
|
||||
elif s.type == PAGETYPE_LBN:
|
||||
if userBlocks.has_key(s.usn):
|
||||
print "Two blocks with same USN, something is weird"
|
||||
userBlocks[s.usn] = b
|
||||
elif s.type == PAGETYPE_FTL_CLEAN:
|
||||
pass
|
||||
pbar.finish()
|
||||
lpnToVpn = {}
|
||||
for usn in sorted(userBlocks.keys(), reverse=True):
|
||||
b = userBlocks[usn]
|
||||
btoc = self.readBTOCPages(b, self.totalPages)
|
||||
if btoc:
|
||||
for i in xrange(self.userPagesPerBlock-1,-1, -1):
|
||||
if not lpnToVpn.has_key(btoc[i]):
|
||||
lpnToVpn[btoc[i]] = b * self.vfl.pages_per_sublk + i
|
||||
else:
|
||||
print "BTOC not found for block %d (usn %d), scanning all pages" % (b, usn)
|
||||
i = 0
|
||||
for p in xrange(self.vfl.pages_per_sublk - self.tocPagesPerBlock -1, -1, -1):
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + p)
|
||||
if s:
|
||||
i+= 1
|
||||
if s and not lpnToVpn.has_key(s.lpn):
|
||||
lpnToVpn[s.lpn] = b * self.vfl.pages_per_sublk + p
|
||||
print "%d used pages in block" % i
|
||||
self.vfl.nand.cacheData("yaftlrestore", lpnToVpn)
|
||||
self.lpnToVpn = lpnToVpn
|
||||
return lpnToVpn
|
||||
|
||||
def YAFTL_readCxtInfo(self, page):
|
||||
s,d = self.YAFTL_readPage(page)
|
||||
if not s or s.type != PAGETYPE_FTL_CLEAN:
|
||||
return False
|
||||
ctx = YAFTL_CXT.parse(d)
|
||||
ctx.spareUsn = s.usn
|
||||
if ctx.version != "CX01":
|
||||
print "Wrong FTL version %s" % ctx.version
|
||||
return False
|
||||
self.usn = s.usn
|
||||
pageToRead = page + 1;
|
||||
userTOCBuffer = self.YAFTL_read_n_Page(pageToRead, self.tocPagesPerBlock)
|
||||
if not userTOCBuffer:
|
||||
raise(Exception("userTOCBuffer"))
|
||||
pageToRead += self.tocPagesPerBlock
|
||||
indexTOCBuffer = self.YAFTL_read_n_Page(pageToRead, self.tocPagesPerBlock)
|
||||
pageToRead += self.tocPagesPerBlock + 1
|
||||
tocArrayIndexPages = self.YAFTL_read_n_Page(pageToRead, self.nPagesTocPageIndices)
|
||||
self.tocArrayIndexPages = array("I", tocArrayIndexPages)
|
||||
assert self.tocArrayIndexPages.itemsize == 4
|
||||
self.indexCache = {}
|
||||
pageToRead += self.nPagesTocPageIndices
|
||||
|
||||
if False: #we don't care, we just want to read
|
||||
blockStatuses = self.YAFTL_read_n_Page(pageToRead, self.nPagesBlockStatuses)
|
||||
pageToRead += self.nPagesBlockStatuses
|
||||
blockReadCounts = self.YAFTL_read_n_Page(pageToRead, self.nPagesBlockReadCounts)
|
||||
pageToRead += self.nPagesBlockReadCounts
|
||||
blockEraseCounts = self.YAFTL_read_n_Page(pageToRead, self.nPagesBlockEraseCounts)
|
||||
pageToRead += self.nPagesBlockEraseCounts
|
||||
validPagesINo = self.YAFTL_read_n_Page(pageToRead, self.nPagesBlockValidPagesINumbers)
|
||||
pageToRead += self.nPagesBlockValidPagesINumbers
|
||||
validPagesDNo = self.YAFTL_read_n_Page(pageToRead, self.nPagesBlockValidPagesDNumbers)
|
||||
|
||||
print "YaFTL context OK, version=%s maxIndexUsn=%d context usn=%d" % (ctx.version, ctx.maxIndexUsn, self.usn)
|
||||
return True
|
||||
|
||||
def YAFTL_read_n_Page(self, page, n, failIfBlank=False):
|
||||
r = ""
|
||||
for i in xrange(0, n):
|
||||
s,d = self.YAFTL_readPage(page +i)
|
||||
if not d:
|
||||
if failIfBlank:
|
||||
return
|
||||
return r
|
||||
r += d
|
||||
return r
|
||||
|
||||
def YAFTL_readPage(self, page, key=META_KEY, lpn=None):
|
||||
return self.vfl.read_single_page(page, key, lpn)
|
||||
|
||||
def build_lpn_to_vpn(self):
|
||||
lpnToVpn = {}
|
||||
for p in xrange(self.totalPages):
|
||||
x = self.translateLPNtoVPN(p)
|
||||
if x != 0xFFFFFFFF:
|
||||
lpnToVpn[p] = x
|
||||
self.vfl.nand.cacheData("currentftl", lpnToVpn)
|
||||
return lpnToVpn
|
||||
|
||||
def translateLPNtoVPN(self, lpn):
|
||||
if self.lpnToVpn:
|
||||
return self.lpnToVpn.get(lpn, 0xFFFFFFFF)
|
||||
tocPageNum = (lpn) / self.tocEntriesPerPage
|
||||
indexPage = self.tocArrayIndexPages[tocPageNum]
|
||||
if indexPage == 0xffffffff:
|
||||
return 0xffffffff
|
||||
#print "indexPage %x" % indexPage
|
||||
if self.indexCache.has_key(indexPage):
|
||||
tocPageBuffer = self.indexCache[indexPage]
|
||||
else:
|
||||
s,tocPageBuffer = self.YAFTL_readPage(indexPage)
|
||||
if not tocPageBuffer:
|
||||
print "tocPageBuffer fail"
|
||||
return 0xffffffff
|
||||
assert s.type == PAGETYPE_INDEX
|
||||
tocPageBuffer = array("I", tocPageBuffer)
|
||||
self.indexCache[indexPage] = tocPageBuffer
|
||||
|
||||
tocEntry = tocPageBuffer[lpn % self.tocEntriesPerPage]
|
||||
return tocEntry
|
||||
|
||||
def readLPN(self, lpn, key=None):#, nPages):
|
||||
vpn = self.translateLPNtoVPN(lpn)
|
||||
if vpn == 0xffffffff:
|
||||
return self.blankPage
|
||||
#print "tocEntry %d" % tocEntry
|
||||
#print "FTL %d => %d" % (lpn, vpn)
|
||||
s,d = self.YAFTL_readPage(vpn, key, lpn)
|
||||
if d == None:
|
||||
return self.blankPage
|
||||
if s.lpn != lpn:
|
||||
raise Exception("YAFTL translation FAIL spare lpn=%d vs expected %d" % (s.lpn, lpn))
|
||||
return d
|
||||
|
||||
def YAFTL_lookup1(self):
|
||||
hax = self.vfl.nand.loadCachedData("YAFTL_lookup1")
|
||||
if hax:
|
||||
print "Found cached FTL lookup table"
|
||||
return hax
|
||||
userBlocks = {}
|
||||
indexBlocks = {}
|
||||
print "Building FTL lookup table v1"
|
||||
pbar = ProgressBar(self.numBlocks)
|
||||
pbar.start()
|
||||
for b in xrange(0, self.numBlocks):
|
||||
pbar.update(b)
|
||||
#read fist page in block, if empty then block is empty
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + 0)
|
||||
if not s:
|
||||
continue
|
||||
if s.type == PAGETYPE_INDEX:
|
||||
indexBlocks[s.usn] = b
|
||||
elif s.type == PAGETYPE_LBN:
|
||||
if userBlocks.has_key(s.usn):
|
||||
print "Two blocks with same USN, something is weird"
|
||||
userBlocks[s.usn] = b
|
||||
elif s.type == PAGETYPE_FTL_CLEAN:
|
||||
pass#print b, "ftl block"
|
||||
pbar.finish()
|
||||
lpnToVpn = {}
|
||||
for usn in sorted(userBlocks.keys(), reverse=False):
|
||||
b = userBlocks[usn]
|
||||
btoc = self.readBTOCPages(b, self.totalPages)
|
||||
#print usn, b
|
||||
if btoc:
|
||||
for i in xrange(self.userPagesPerBlock-1,-1, -1):
|
||||
lpnToVpn.setdefault(btoc[i], []).append(b * self.vfl.pages_per_sublk + i)
|
||||
else:
|
||||
#print "btoc not found for block %d (usn %d), scanning all pages" % (b, usn)
|
||||
i = 0
|
||||
usn = -1
|
||||
for p in xrange(self.vfl.pages_per_sublk - self.tocPagesPerBlock -1, -1, -1):
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + p)
|
||||
if not s:
|
||||
break
|
||||
i+= 1
|
||||
if usn == -1:
|
||||
usn = s.usn
|
||||
if usn != s.usn:
|
||||
#print "Two usns in same block %d %d" % (usn, s.usn)
|
||||
usn = s.usn
|
||||
lpnToVpn.setdefault(s.lpn, []).append(b * self.vfl.pages_per_sublk + p)
|
||||
#print "%d used pages in block" % i
|
||||
#self.vfl.nand.cacheData("YAFTL_lookup1", (lpnToVpn, userBlocks))
|
||||
return lpnToVpn, userBlocks
|
||||
|
||||
def YAFTL_hax2(self):
|
||||
hax = self.vfl.nand.loadCachedData("YAFTL_hax2")
|
||||
if hax:
|
||||
print "Found cached FTL HAX2 information"
|
||||
return hax
|
||||
|
||||
print "FTL hax2 in progress"
|
||||
pbar = ProgressBar(self.numBlocks)
|
||||
pbar.start()
|
||||
lpnToVpn = {}
|
||||
for b in xrange(0, self.numBlocks):
|
||||
pbar.update(b)
|
||||
#read fist page in block, if empty then block is empty (right?)
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + 0)
|
||||
if not s:
|
||||
continue
|
||||
if s.type == PAGETYPE_LBN:
|
||||
i = 0
|
||||
usn = -1
|
||||
for p in xrange(0, self.vfl.pages_per_sublk - self.tocPagesPerBlock):
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + p)
|
||||
if not s:
|
||||
break
|
||||
lpnToVpn.setdefault(s.lpn, {}).setdefault(s.usn, []).append(b * self.vfl.pages_per_sublk + p)
|
||||
i+= 1
|
||||
|
||||
pbar.finish()
|
||||
self.vfl.nand.cacheData("YAFTL_hax2", lpnToVpn)
|
||||
return lpnToVpn
|
||||
|
||||
def block_lpn_to_vpn(self, block):
|
||||
res = {}
|
||||
for p in xrange(0, self.vfl.pages_per_sublk - self.tocPagesPerBlock):
|
||||
s,d = self.YAFTL_readPage(block * self.vfl.pages_per_sublk + p)
|
||||
if not s:
|
||||
break
|
||||
res[s.lpn] = block * self.vfl.pages_per_sublk + p
|
||||
return res
|
||||
Reference in New Issue
Block a user