initial code for dumping imessages in a reasonable format
This commit is contained in:
parent
c0021efb13
commit
9dd7628f04
1
dump-imessages/.gitignore
vendored
Normal file
1
dump-imessages/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.tmp
|
7
dump-imessages/Makefile
Normal file
7
dump-imessages/Makefile
Normal file
@ -0,0 +1,7 @@
|
||||
default: dump
|
||||
|
||||
dump:
|
||||
./dump.sh
|
||||
|
||||
clean:
|
||||
rm -rf *.tmp
|
5
dump-imessages/README.md
Normal file
5
dump-imessages/README.md
Normal file
@ -0,0 +1,5 @@
|
||||
# notes
|
||||
|
||||
* iphone-dataprotection cloned from
|
||||
https://code.google.com/p/iphone-dataprotection/ on 20140207
|
||||
|
39
dump-imessages/dump.sh
Executable file
39
dump-imessages/dump.sh
Executable file
@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
BACKUPS=""
|
||||
|
||||
for B in ls ~/Library/Application\ Support/MobileSync/Backup/* ; do
|
||||
BACKUPS+=" $(basename "$B")"
|
||||
done
|
||||
|
||||
if [ -r ${HOME}/Documents/Secure/iphone-backup-password.sh ]; then
|
||||
source "${HOME}/Documents/Secure/iphone-backup-password.sh"
|
||||
fi
|
||||
|
||||
# expecting $IPHONE_BACKUP_PASSWORD to be set now. set it in your
|
||||
# environment if not, or put a script exporting it at that path above
|
||||
|
||||
echo $BACKUPS
|
||||
|
||||
WORKDIR="$TMPDIR/iphone-sms-dump.workd"
|
||||
if [ ! -d "$WORKDIR" ]; then
|
||||
mkdir -p "$WORKDIR"
|
||||
fi
|
||||
|
||||
for BID in $BACKUPS ; do
|
||||
if [ ! -r $WORKDIR/sms-$BID.db ]; then
|
||||
TD="$(mktemp -d -t bdir)/out"
|
||||
echo -e "y\n$IPHONE_BACKUP_PASSWORD" |
|
||||
python ./iphone-dataprotection/python_scripts/backup_tool.py \
|
||||
"${HOME}/Library/Application Support/MobileSync/Backup/$BID" \
|
||||
"$TD" 2>&1 > /dev/null # hush
|
||||
echo "extracted to $TD"
|
||||
mv "$TD/HomeDomain/Library/SMS/sms.db" ./sms-$BID.db
|
||||
mv "$TD/MediaDomain/Library/SMS/Attachments" ./Attachments-$BID.d
|
||||
rm -rf "$TD"
|
||||
fi
|
||||
done
|
||||
|
||||
for BID in $BACKUPS ; do
|
||||
# now we process them...
|
||||
done
|
8
dump-imessages/iphone-dataprotection/.hgignore
Normal file
8
dump-imessages/iphone-dataprotection/.hgignore
Normal file
@ -0,0 +1,8 @@
|
||||
# use glob syntax.
|
||||
syntax: glob
|
||||
|
||||
*.o
|
||||
*.pyc
|
||||
*.dmg
|
||||
*.ipsw
|
||||
|
7
dump-imessages/iphone-dataprotection/CREDITS.txt
Normal file
7
dump-imessages/iphone-dataprotection/CREDITS.txt
Normal file
@ -0,0 +1,7 @@
|
||||
comex
|
||||
chronic dev team
|
||||
idroid/openiboot team
|
||||
iphone dev team
|
||||
Jonathan Zdziarski
|
||||
msftguy
|
||||
planetbeing
|
1
dump-imessages/iphone-dataprotection/README.txt
Normal file
1
dump-imessages/iphone-dataprotection/README.txt
Normal file
@ -0,0 +1 @@
|
||||
See http://code.google.com/p/iphone-dataprotection/wiki/README
|
81
dump-imessages/iphone-dataprotection/build_ramdisk.sh
Executable file
81
dump-imessages/iphone-dataprotection/build_ramdisk.sh
Executable file
@ -0,0 +1,81 @@
|
||||
#!/bin/bash
|
||||
|
||||
#set +e
|
||||
#set -o errexit
|
||||
|
||||
if [ $# -lt 4 ]
|
||||
then
|
||||
echo "Syntax: $0 IPSW RAMDISK KEY IV CUSTOMRAMDISK"
|
||||
echo "python_scripts/kernel_patcher.py can generate a shell script with the correct parameters"
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ ! -f ramdisk_tools/restored_external ]
|
||||
then
|
||||
echo "ramdisk_tools/restored_external not found, check compilation output for errors"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
IPSW=$1
|
||||
RAMDISK=$2
|
||||
KEY=$3
|
||||
IV=$4
|
||||
CUSTOMRAMDISK=$5
|
||||
if [ "$CUSTOMRAMDISK" == "" ]; then
|
||||
CUSTOMRAMDISK="myramdisk.dmg"
|
||||
fi
|
||||
IMG3FS="./img3fs/img3fs"
|
||||
IMG3MNT="/tmp/img3"
|
||||
|
||||
if [ ! -f $IMG3FS ]; then
|
||||
echo "img3fs is missing, install osxfuse and run make -C img3fs/"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
if [ ! -f ssh.tar.gz ]; then
|
||||
echo "Downloading ssh.tar.gz from googlecode"
|
||||
curl -O http://iphone-dataprotection.googlecode.com/files/ssh.tar.gz
|
||||
fi
|
||||
|
||||
unzip $IPSW $RAMDISK
|
||||
|
||||
if [ -d "/Volumes/ramdisk" ]; then
|
||||
hdiutil eject /Volumes/ramdisk
|
||||
umount $IMG3MNT
|
||||
fi
|
||||
|
||||
mkdir -p $IMG3MNT
|
||||
|
||||
$IMG3FS -key $KEY -iv $IV $IMG3MNT $RAMDISK
|
||||
|
||||
hdiutil attach -owners off $IMG3MNT/DATA.dmg
|
||||
|
||||
#remove baseband files to free space
|
||||
rm -rf /Volumes/ramdisk/usr/local/standalone/firmware/*
|
||||
rm -rf /Volumes/ramdisk/usr/share/progressui/
|
||||
#dont replace existing files, replacing launchctl on armv6 ramdisks makes it fail somehow
|
||||
tar -C /Volumes/ramdisk/ -xzkP < ssh.tar.gz
|
||||
rm /Volumes/ramdisk/bin/vdir
|
||||
rm /Volumes/ramdisk/bin/egrep
|
||||
rm /Volumes/ramdisk/bin/grep
|
||||
|
||||
#rm /Volumes/ramdisk/usr/local/bin/restored_external
|
||||
cp ramdisk_tools/restored_external /Volumes/ramdisk/usr/local/bin
|
||||
|
||||
cp ramdisk_tools/bruteforce ramdisk_tools/device_infos /Volumes/ramdisk/var/root
|
||||
cp ramdisk_tools/scripts/* /Volumes/ramdisk/var/root
|
||||
cp ramdisk_tools/ioflashstoragekit /Volumes/ramdisk/var/root
|
||||
|
||||
#if present, copy ssh public key to ramdisk
|
||||
if [ -f ~/.ssh/id_rsa.pub ]; then
|
||||
mkdir /Volumes/ramdisk/var/root/.ssh
|
||||
cp ~/.ssh/id_rsa.pub /Volumes/ramdisk/var/root/.ssh/authorized_keys
|
||||
fi
|
||||
|
||||
hdiutil eject /Volumes/ramdisk
|
||||
umount $IMG3MNT
|
||||
|
||||
mv $RAMDISK $CUSTOMRAMDISK
|
||||
|
||||
#echo "$CUSTOMRAMDISK created"
|
||||
|
89
dump-imessages/iphone-dataprotection/build_ramdisk_ios6.sh
Executable file
89
dump-imessages/iphone-dataprotection/build_ramdisk_ios6.sh
Executable file
@ -0,0 +1,89 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ! $(uname) == "Darwin" ]
|
||||
then
|
||||
echo "Script for Mac OS X only"
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ $# -lt 1 ]
|
||||
then
|
||||
echo "Syntax: $0 DECRYPTED_RAMDISK_DMG"
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ ! -f ssh.tar.gz ]
|
||||
then
|
||||
echo "Downloading ssh.tar.gz from googlecode"
|
||||
curl -O http://iphone-dataprotection.googlecode.com/files/ssh.tar.gz
|
||||
fi
|
||||
|
||||
if [ ! -f libncurses.5.dylib ]
|
||||
then
|
||||
echo "Downloading libncurses.5.dylib from googlecode"
|
||||
curl -O http://iphone-dataprotection.googlecode.com/files/libncurses.5.dylib
|
||||
fi
|
||||
|
||||
echo "Rebuilding ramdisk_tools"
|
||||
|
||||
./build_tools.sh
|
||||
|
||||
#compiling in a vmware shared folder can produce binaries filled with zeroes !
|
||||
if [ ! -f ramdisk_tools/restored_external ] || [ "$(file -b ramdisk_tools/restored_external)" == "data" ]
|
||||
then
|
||||
echo "ramdisk_tools/restored_external not found or invalid, check compilation output for errors"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
RAMDISK=$1
|
||||
|
||||
RD_SIZE=$(du -h $RAMDISK | cut -f 1)
|
||||
|
||||
if [ ! $RD_SIZE == "20M" ]
|
||||
then
|
||||
echo "resizing ramdisk..."
|
||||
echo "hdiutil will segfault if ramdisk was already resized, thats ok"
|
||||
hdiutil resize -size 20M $RAMDISK
|
||||
fi
|
||||
|
||||
if [ -d /Volumes/ramdisk ]
|
||||
then
|
||||
echo "Unmount /Volumes/ramdisk then try again"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
echo "Attaching ramdisk"
|
||||
|
||||
hdiutil attach $RAMDISK
|
||||
rm -rf /Volumes/ramdisk/usr/local/standalone/firmware/*
|
||||
rm -rf /Volumes/ramdisk/usr/share/progressui/
|
||||
|
||||
if [ ! -f /Volumes/ramdisk/sbin/sshd ]
|
||||
then
|
||||
echo "Unpacking ssh.tar.gz on ramdisk..."
|
||||
tar -C /Volumes/ramdisk/ -xzkP < ssh.tar.gz
|
||||
echo "^^ This tar error message is okay"
|
||||
fi
|
||||
|
||||
if [ ! -f /Volumes/ramdisk/usr/lib/libncurses.5.4.dylib ]
|
||||
then
|
||||
echo "Adding libncurses..."
|
||||
cp libncurses.5.dylib /Volumes/ramdisk/usr/lib/libncurses.5.4.dylib
|
||||
fi
|
||||
|
||||
echo "Adding/updating ramdisk_tools binaries on ramdisk..."
|
||||
cp ramdisk_tools/restored_external /Volumes/ramdisk/usr/local/bin/
|
||||
cp ramdisk_tools/bruteforce ramdisk_tools/device_infos /Volumes/ramdisk/var/root
|
||||
cp ramdisk_tools/scripts/* /Volumes/ramdisk/var/root
|
||||
|
||||
ls -laht /Volumes/ramdisk/usr/local/bin/
|
||||
|
||||
#if present, copy ssh public key to ramdisk
|
||||
if [ -f ~/.ssh/id_rsa.pub ] && [ ! -d /Volumes/ramdisk/var/root/.ssh ]
|
||||
then
|
||||
mkdir /Volumes/ramdisk/var/root/.ssh
|
||||
cp ~/.ssh/id_rsa.pub /Volumes/ramdisk/var/root/.ssh/authorized_keys
|
||||
chmod 0600 /Volumes/ramdisk/var/root/.ssh/authorized_keys
|
||||
fi
|
||||
|
||||
hdiutil eject /Volumes/ramdisk
|
53
dump-imessages/iphone-dataprotection/build_tools.sh
Executable file
53
dump-imessages/iphone-dataprotection/build_tools.sh
Executable file
@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ! $(uname) == "Darwin" ]
|
||||
then
|
||||
echo "Script for Mac OS X only"
|
||||
exit
|
||||
fi
|
||||
|
||||
for VER in 5.0 5.1 6.0 6.1 7.0
|
||||
do
|
||||
if [ -d "/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$VER.sdk/" ];
|
||||
then
|
||||
SDKVER=$VER
|
||||
SDKPATH="/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$VER.sdk/"
|
||||
echo "Found iOS SDK at $SDKPATH"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$SDKVER" == "" ]; then
|
||||
echo "iOS SDK not found"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
if [ ! -f "$SDKPATH/System/Library/Frameworks/IOKit.framework/Headers/IOKitLib.h" ]; then
|
||||
echo "IOKit headers missing"
|
||||
|
||||
$IOKIT_HDR_109="/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/IOKit.framework/Headers"
|
||||
|
||||
if [ -d $IOKIT_HDR_109 ]; then
|
||||
echo "Symlinking headers"
|
||||
sudo ln -s $IOKIT_HDR_109 "$SDKPATH/System/Library/Frameworks/IOKit.framework/Headers"
|
||||
sudo ln -s "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/usr/include/libkern/OSTypes.h" "$SDKPATH/usr/include/libkern/OSTypes.h"
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
if [ ! -f "$SDKPATH/System/Library/Frameworks/IOKit.framework/IOKit" ]; then
|
||||
echo "IOKit binary missing"
|
||||
|
||||
if [ -f "$SDKPATH/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit" ]; then
|
||||
echo "Creating IOKit symlink for iOS 7.0 SDK"
|
||||
sudo ln -s "$SDKPATH/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit" "$SDKPATH/System/Library/Frameworks/IOKit.framework/IOKit"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f "$SDKPATH/System/Library/Frameworks/IOKit.framework/Headers/IOKitLib.h" ]; then
|
||||
if [ -f "$SDKPATH/System/Library/Frameworks/IOKit.framework/IOKit" ]; then
|
||||
export SDKVER
|
||||
make -C ramdisk_tools clean
|
||||
make -C ramdisk_tools
|
||||
fi
|
||||
fi
|
19
dump-imessages/iphone-dataprotection/dump_data_partition.sh
Executable file
19
dump-imessages/iphone-dataprotection/dump_data_partition.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/sh
|
||||
SSHOPTS="-p 2222 -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null"
|
||||
|
||||
UDID=`ssh $SSHOPTS root@localhost "./device_infos udid"`
|
||||
|
||||
if [ "$UDID" == "" ]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
echo "Device UDID : $UDID"
|
||||
|
||||
mkdir -p $UDID
|
||||
|
||||
DATE=`date +"%Y%m%d-%H%M"`
|
||||
OUT=$UDID/data_$DATE.dmg
|
||||
|
||||
echo "Dumping data partition in $OUT ..."
|
||||
|
||||
ssh $SSHOPTS root@localhost "dd if=/dev/rdisk0s2s1 bs=8192 || dd if=/dev/rdisk0s1s2 bs=8192" > $OUT
|
92
dump-imessages/iphone-dataprotection/emf_decrypter/BUILD
Normal file
92
dump-imessages/iphone-dataprotection/emf_decrypter/BUILD
Normal file
@ -0,0 +1,92 @@
|
||||
INSTRUCTIONS FOR BUILDING XPWN
|
||||
------------------------------
|
||||
|
||||
These are very basic instructions on how to build xpwn related projects, they
|
||||
are tailored to Debian based systems. They are not meant to be a substitute
|
||||
for experience in programming in GNU/Linux environments, but it should be a
|
||||
good starting point.
|
||||
|
||||
1. Install a basic build environment (compilers, etc.):
|
||||
|
||||
sudo apt-get install build-essential
|
||||
|
||||
2. Install some prerequisites libraries required by xpwn:
|
||||
|
||||
sudo apt-get install libcrypt-dev libz-dev libbz2-dev3 libusb-dev
|
||||
|
||||
3. Install cmake. It is recommended you download and build it from the
|
||||
official cmake website, since versions >= 2.6.0 are recommended.
|
||||
|
||||
wget http://www.cmake.org/files/v2.6/cmake-2.6.2.tar.gz
|
||||
tar zxvf cmake-2.6.2.tar.gz
|
||||
cd cmake-2.6.2
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
|
||||
Now you are ready to build xpwn. It is highly recommended that you build
|
||||
out-of-source (that is, the build products are not placed into the same
|
||||
folders as the sources). This is much neater and cleaning up is as simple as
|
||||
deleting the build products folder.
|
||||
|
||||
Assuming xpwn sources are in ~/xpwn:
|
||||
|
||||
4. Create a build folder
|
||||
|
||||
cd ~
|
||||
mkdir build
|
||||
cd build
|
||||
|
||||
5. Create Makefiles
|
||||
|
||||
cmake ~/xpwn
|
||||
|
||||
6. Build
|
||||
|
||||
make
|
||||
|
||||
7. Package
|
||||
|
||||
make package
|
||||
|
||||
BUILDING USEFUL LIBRARIES
|
||||
-------------------------
|
||||
|
||||
These command-lines can be substituted in for step 6. The products are in the
|
||||
subfolders (make package will not include them).
|
||||
|
||||
xpwn library (for IPSW generation)
|
||||
|
||||
make libXPwn.a
|
||||
|
||||
Windows pwnmetheus library (for QuickPwn)
|
||||
|
||||
make libpwnmetheus.dll
|
||||
|
||||
HELPFUL MAKEFILE GENERATION COMMAND-LINES
|
||||
-----------------------------------------
|
||||
|
||||
These command-lines can be substituted in for step 5.
|
||||
|
||||
Add debugging symbols:
|
||||
|
||||
cmake ~/xpwn -DCMAKE_C_FLAGS=-g
|
||||
|
||||
Try to only use static libraries:
|
||||
|
||||
cmake ~/xpwn -DBUILD_STATIC=YES
|
||||
|
||||
|
||||
CROSS-COMPILING
|
||||
---------------
|
||||
|
||||
This is a complex and advanced topic, but it is possible with the appropriate
|
||||
CMake toolchain files and properly configured build environment. I have
|
||||
crossbuilt Windows, OS X, Linux x86, Linux x64, and iPhone binaries from one
|
||||
Ubuntu machine. The source trees are properly configured for this task.
|
||||
|
||||
MORE HELP
|
||||
---------
|
||||
|
||||
Consult the CMake documentation and wiki and look in the CMakeLists.txt files
|
||||
for hints on how things are supposed to work.
|
@ -0,0 +1,29 @@
|
||||
cmake_minimum_required(VERSION 2.6)
|
||||
set(CMAKE_LEGACY_CYGWIN_WIN32 0)
|
||||
|
||||
project (XPwn)
|
||||
|
||||
# We want win32 executables to build staticly by default, since it's more difficult to keep the shared libraries straight on Windows
|
||||
IF(WIN32)
|
||||
SET(BUILD_STATIC ON CACHE BOOL "Force compilation with static libraries")
|
||||
ELSE(WIN32)
|
||||
SET(BUILD_STATIC OFF CACHE BOOL "Force compilation with static libraries")
|
||||
ENDIF(WIN32)
|
||||
|
||||
IF(BUILD_STATIC)
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
ENDIF(BUILD_STATIC)
|
||||
|
||||
include_directories (${PROJECT_SOURCE_DIR}/includes)
|
||||
|
||||
add_subdirectory (common)
|
||||
add_subdirectory (hfs)
|
||||
add_subdirectory (emf)
|
||||
|
||||
IF(WIN32 OR APPLE)
|
||||
SET(CPACK_GENERATOR "ZIP")
|
||||
ELSE(WIN32 OR APPLE)
|
||||
SET(CPACK_GENERATOR "TBZ2")
|
||||
ENDIF(WIN32 OR APPLE)
|
||||
|
||||
INCLUDE(CPack)
|
674
dump-imessages/iphone-dataprotection/emf_decrypter/LICENSE
Normal file
674
dump-imessages/iphone-dataprotection/emf_decrypter/LICENSE
Normal file
@ -0,0 +1,674 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
@ -0,0 +1,13 @@
|
||||
DEPRECATED: use the python version instead
|
||||
|
||||
Decrypts files data forks in raw iOS 4 disk images.
|
||||
Reads encryption keys from plist file named after the volume ID.
|
||||
The plist file must have at least the EMF and DKey fields set.
|
||||
For now the tool decrypts the data forks but does not mark the files as
|
||||
"decrypted" : running it twice on the same image will produce garbage.
|
||||
Interrupting the process will also leave the image "half decrypted".
|
||||
|
||||
Uses planetbeing/dev team HFS implementation
|
||||
https://github.com/planetbeing/xpwn
|
||||
|
||||
Only builds on Mac OS X, requires CoreFoundation for plist access.
|
@ -0,0 +1,2 @@
|
||||
add_library(common abstractfile.c base64.c)
|
||||
|
@ -0,0 +1,311 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "abstractfile.h"
|
||||
#include "common.h"
|
||||
|
||||
size_t freadWrapper(AbstractFile* file, void* data, size_t len) {
|
||||
return fread(data, 1, len, (FILE*) (file->data));
|
||||
}
|
||||
|
||||
size_t fwriteWrapper(AbstractFile* file, const void* data, size_t len) {
|
||||
return fwrite(data, 1, len, (FILE*) (file->data));
|
||||
}
|
||||
|
||||
int fseekWrapper(AbstractFile* file, off_t offset) {
|
||||
return fseeko((FILE*) (file->data), offset, SEEK_SET);
|
||||
}
|
||||
|
||||
off_t ftellWrapper(AbstractFile* file) {
|
||||
return ftello((FILE*) (file->data));
|
||||
}
|
||||
|
||||
void fcloseWrapper(AbstractFile* file) {
|
||||
fclose((FILE*) (file->data));
|
||||
free(file);
|
||||
}
|
||||
|
||||
off_t fileGetLength(AbstractFile* file) {
|
||||
off_t length;
|
||||
off_t pos;
|
||||
|
||||
pos = ftello((FILE*) (file->data));
|
||||
|
||||
fseeko((FILE*) (file->data), 0, SEEK_END);
|
||||
length = ftello((FILE*) (file->data));
|
||||
|
||||
fseeko((FILE*) (file->data), pos, SEEK_SET);
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
AbstractFile* createAbstractFileFromFile(FILE* file) {
|
||||
AbstractFile* toReturn;
|
||||
|
||||
if(file == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
toReturn = (AbstractFile*) malloc(sizeof(AbstractFile));
|
||||
toReturn->data = file;
|
||||
toReturn->read = freadWrapper;
|
||||
toReturn->write = fwriteWrapper;
|
||||
toReturn->seek = fseekWrapper;
|
||||
toReturn->tell = ftellWrapper;
|
||||
toReturn->getLength = fileGetLength;
|
||||
toReturn->close = fcloseWrapper;
|
||||
toReturn->type = AbstractFileTypeFile;
|
||||
return toReturn;
|
||||
}
|
||||
|
||||
size_t dummyRead(AbstractFile* file, void* data, size_t len) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t dummyWrite(AbstractFile* file, const void* data, size_t len) {
|
||||
*((off_t*) (file->data)) += len;
|
||||
return len;
|
||||
}
|
||||
|
||||
int dummySeek(AbstractFile* file, off_t offset) {
|
||||
*((off_t*) (file->data)) = offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
off_t dummyTell(AbstractFile* file) {
|
||||
return *((off_t*) (file->data));
|
||||
}
|
||||
|
||||
void dummyClose(AbstractFile* file) {
|
||||
free(file);
|
||||
}
|
||||
|
||||
AbstractFile* createAbstractFileFromDummy() {
|
||||
AbstractFile* toReturn;
|
||||
toReturn = (AbstractFile*) malloc(sizeof(AbstractFile));
|
||||
toReturn->data = NULL;
|
||||
toReturn->read = dummyRead;
|
||||
toReturn->write = dummyWrite;
|
||||
toReturn->seek = dummySeek;
|
||||
toReturn->tell = dummyTell;
|
||||
toReturn->getLength = NULL;
|
||||
toReturn->close = dummyClose;
|
||||
toReturn->type = AbstractFileTypeDummy;
|
||||
return toReturn;
|
||||
}
|
||||
|
||||
size_t memRead(AbstractFile* file, void* data, size_t len) {
|
||||
MemWrapperInfo* info = (MemWrapperInfo*) (file->data);
|
||||
if(info->bufferSize < (info->offset + len)) {
|
||||
len = info->bufferSize - info->offset;
|
||||
}
|
||||
memcpy(data, (void*)((uint8_t*)(*(info->buffer)) + (uint32_t)info->offset), len);
|
||||
info->offset += (size_t)len;
|
||||
return len;
|
||||
}
|
||||
|
||||
size_t memWrite(AbstractFile* file, const void* data, size_t len) {
|
||||
MemWrapperInfo* info = (MemWrapperInfo*) (file->data);
|
||||
|
||||
while((info->offset + (size_t)len) > info->bufferSize) {
|
||||
info->bufferSize <<= 1;
|
||||
*(info->buffer) = realloc(*(info->buffer), info->bufferSize);
|
||||
}
|
||||
|
||||
memcpy((void*)((uint8_t*)(*(info->buffer)) + (uint32_t)info->offset), data, len);
|
||||
info->offset += (size_t)len;
|
||||
return len;
|
||||
}
|
||||
|
||||
int memSeek(AbstractFile* file, off_t offset) {
|
||||
MemWrapperInfo* info = (MemWrapperInfo*) (file->data);
|
||||
info->offset = (size_t)offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
off_t memTell(AbstractFile* file) {
|
||||
MemWrapperInfo* info = (MemWrapperInfo*) (file->data);
|
||||
return (off_t)info->offset;
|
||||
}
|
||||
|
||||
off_t memGetLength(AbstractFile* file) {
|
||||
MemWrapperInfo* info = (MemWrapperInfo*) (file->data);
|
||||
return info->bufferSize;
|
||||
}
|
||||
|
||||
void memClose(AbstractFile* file) {
|
||||
free(file->data);
|
||||
free(file);
|
||||
}
|
||||
|
||||
AbstractFile* createAbstractFileFromMemory(void** buffer, size_t size) {
|
||||
MemWrapperInfo* info;
|
||||
AbstractFile* toReturn;
|
||||
toReturn = (AbstractFile*) malloc(sizeof(AbstractFile));
|
||||
|
||||
info = (MemWrapperInfo*) malloc(sizeof(MemWrapperInfo));
|
||||
info->offset = 0;
|
||||
info->buffer = buffer;
|
||||
info->bufferSize = size;
|
||||
|
||||
toReturn->data = info;
|
||||
toReturn->read = memRead;
|
||||
toReturn->write = memWrite;
|
||||
toReturn->seek = memSeek;
|
||||
toReturn->tell = memTell;
|
||||
toReturn->getLength = memGetLength;
|
||||
toReturn->close = memClose;
|
||||
toReturn->type = AbstractFileTypeMem;
|
||||
return toReturn;
|
||||
}
|
||||
|
||||
void abstractFilePrint(AbstractFile* file, const char* format, ...) {
|
||||
va_list args;
|
||||
char buffer[1024];
|
||||
size_t length;
|
||||
|
||||
buffer[0] = '\0';
|
||||
va_start(args, format);
|
||||
length = vsprintf(buffer, format, args);
|
||||
va_end(args);
|
||||
ASSERT(file->write(file, buffer, length) == length, "fwrite");
|
||||
}
|
||||
|
||||
int absFileRead(io_func* io, off_t location, size_t size, void *buffer) {
|
||||
AbstractFile* file;
|
||||
file = (AbstractFile*) io->data;
|
||||
file->seek(file, location);
|
||||
if(file->read(file, buffer, size) == size) {
|
||||
return TRUE;
|
||||
} else {
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
int absFileWrite(io_func* io, off_t location, size_t size, void *buffer) {
|
||||
AbstractFile* file;
|
||||
file = (AbstractFile*) io->data;
|
||||
file->seek(file, location);
|
||||
if(file->write(file, buffer, size) == size) {
|
||||
return TRUE;
|
||||
} else {
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
void closeAbsFile(io_func* io) {
|
||||
AbstractFile* file;
|
||||
file = (AbstractFile*) io->data;
|
||||
file->close(file);
|
||||
free(io);
|
||||
}
|
||||
|
||||
|
||||
io_func* IOFuncFromAbstractFile(AbstractFile* file) {
|
||||
io_func* io;
|
||||
|
||||
io = (io_func*) malloc(sizeof(io_func));
|
||||
io->data = file;
|
||||
io->read = &absFileRead;
|
||||
io->write = &absFileWrite;
|
||||
io->close = &closeAbsFile;
|
||||
|
||||
return io;
|
||||
}
|
||||
|
||||
size_t memFileRead(AbstractFile* file, void* data, size_t len) {
|
||||
MemFileWrapperInfo* info = (MemFileWrapperInfo*) (file->data);
|
||||
memcpy(data, (void*)((uint8_t*)(*(info->buffer)) + (uint32_t)info->offset), len);
|
||||
info->offset += (size_t)len;
|
||||
return len;
|
||||
}
|
||||
|
||||
size_t memFileWrite(AbstractFile* file, const void* data, size_t len) {
|
||||
MemFileWrapperInfo* info = (MemFileWrapperInfo*) (file->data);
|
||||
|
||||
while((info->offset + (size_t)len) > info->actualBufferSize) {
|
||||
info->actualBufferSize <<= 1;
|
||||
*(info->buffer) = realloc(*(info->buffer), info->actualBufferSize);
|
||||
}
|
||||
|
||||
if((info->offset + (size_t)len) > (*(info->bufferSize))) {
|
||||
memset(((uint8_t*)(*(info->buffer))) + *(info->bufferSize), 0, (info->offset + (size_t)len) - *(info->bufferSize));
|
||||
*(info->bufferSize) = info->offset + (size_t)len;
|
||||
}
|
||||
|
||||
memcpy((void*)((uint8_t*)(*(info->buffer)) + (uint32_t)info->offset), data, len);
|
||||
info->offset += (size_t)len;
|
||||
return len;
|
||||
}
|
||||
|
||||
int memFileSeek(AbstractFile* file, off_t offset) {
|
||||
MemFileWrapperInfo* info = (MemFileWrapperInfo*) (file->data);
|
||||
info->offset = (size_t)offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
off_t memFileTell(AbstractFile* file) {
|
||||
MemFileWrapperInfo* info = (MemFileWrapperInfo*) (file->data);
|
||||
return (off_t)info->offset;
|
||||
}
|
||||
|
||||
off_t memFileGetLength(AbstractFile* file) {
|
||||
MemFileWrapperInfo* info = (MemFileWrapperInfo*) (file->data);
|
||||
return *(info->bufferSize);
|
||||
}
|
||||
|
||||
void memFileClose(AbstractFile* file) {
|
||||
free(file->data);
|
||||
free(file);
|
||||
}
|
||||
|
||||
AbstractFile* createAbstractFileFromMemoryFile(void** buffer, size_t* size) {
|
||||
MemFileWrapperInfo* info;
|
||||
AbstractFile* toReturn;
|
||||
toReturn = (AbstractFile*) malloc(sizeof(AbstractFile));
|
||||
|
||||
info = (MemFileWrapperInfo*) malloc(sizeof(MemFileWrapperInfo));
|
||||
info->offset = 0;
|
||||
info->buffer = buffer;
|
||||
info->bufferSize = size;
|
||||
info->actualBufferSize = (1024 < (*size)) ? (*size) : 1024;
|
||||
if(info->actualBufferSize != *(info->bufferSize)) {
|
||||
*(info->buffer) = realloc(*(info->buffer), info->actualBufferSize);
|
||||
}
|
||||
|
||||
toReturn->data = info;
|
||||
toReturn->read = memFileRead;
|
||||
toReturn->write = memFileWrite;
|
||||
toReturn->seek = memFileSeek;
|
||||
toReturn->tell = memFileTell;
|
||||
toReturn->getLength = memFileGetLength;
|
||||
toReturn->close = memFileClose;
|
||||
toReturn->type = AbstractFileTypeMemFile;
|
||||
return toReturn;
|
||||
}
|
||||
|
||||
AbstractFile* createAbstractFileFromMemoryFileBuffer(void** buffer, size_t* size, size_t actualBufferSize) {
|
||||
MemFileWrapperInfo* info;
|
||||
AbstractFile* toReturn;
|
||||
toReturn = (AbstractFile*) malloc(sizeof(AbstractFile));
|
||||
|
||||
info = (MemFileWrapperInfo*) malloc(sizeof(MemFileWrapperInfo));
|
||||
info->offset = 0;
|
||||
info->buffer = buffer;
|
||||
info->bufferSize = size;
|
||||
info->actualBufferSize = actualBufferSize;
|
||||
|
||||
toReturn->data = info;
|
||||
toReturn->read = memFileRead;
|
||||
toReturn->write = memFileWrite;
|
||||
toReturn->seek = memFileSeek;
|
||||
toReturn->tell = memFileTell;
|
||||
toReturn->getLength = memFileGetLength;
|
||||
toReturn->close = memFileClose;
|
||||
toReturn->type = AbstractFileTypeMemFile;
|
||||
return toReturn;
|
||||
}
|
||||
|
@ -0,0 +1,183 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <dmg/dmg.h>
|
||||
|
||||
unsigned char* decodeBase64(char* toDecode, size_t* dataLength) {
|
||||
uint8_t buffer[4];
|
||||
uint8_t charsInBuffer;
|
||||
unsigned char* curChar;
|
||||
unsigned char* decodeBuffer;
|
||||
unsigned int decodeLoc;
|
||||
unsigned int decodeBufferSize;
|
||||
uint8_t bytesToDrop;
|
||||
|
||||
curChar = (unsigned char*) toDecode;
|
||||
charsInBuffer = 0;
|
||||
|
||||
decodeBufferSize = 100;
|
||||
decodeLoc = 0;
|
||||
decodeBuffer = (unsigned char*) malloc(decodeBufferSize);
|
||||
|
||||
bytesToDrop = 0;
|
||||
|
||||
while((*curChar) != '\0') {
|
||||
if((*curChar) >= 'A' && (*curChar) <= 'Z') {
|
||||
buffer[charsInBuffer] = (*curChar) - 'A';
|
||||
charsInBuffer++;
|
||||
}
|
||||
|
||||
if((*curChar) >= 'a' && (*curChar) <= 'z') {
|
||||
buffer[charsInBuffer] = ((*curChar) - 'a') + ('Z' - 'A' + 1);
|
||||
charsInBuffer++;
|
||||
}
|
||||
|
||||
if((*curChar) >= '0' && (*curChar) <= '9') {
|
||||
buffer[charsInBuffer] = ((*curChar) - '0') + ('Z' - 'A' + 1) + ('z' - 'a' + 1);
|
||||
charsInBuffer++;
|
||||
}
|
||||
|
||||
if((*curChar) == '+') {
|
||||
buffer[charsInBuffer] = ('Z' - 'A' + 1) + ('z' - 'a' + 1) + ('9' - '0' + 1);
|
||||
charsInBuffer++;
|
||||
}
|
||||
|
||||
if((*curChar) == '/') {
|
||||
buffer[charsInBuffer] = ('Z' - 'A' + 1) + ('z' - 'a' + 1) + ('9' - '0' + 1) + 1;
|
||||
charsInBuffer++;
|
||||
}
|
||||
|
||||
if((*curChar) == '=') {
|
||||
bytesToDrop++;
|
||||
}
|
||||
|
||||
if(charsInBuffer == 4) {
|
||||
charsInBuffer = 0;
|
||||
|
||||
if((decodeLoc + 3) >= decodeBufferSize) {
|
||||
decodeBufferSize <<= 1;
|
||||
decodeBuffer = (unsigned char*) realloc(decodeBuffer, decodeBufferSize);
|
||||
}
|
||||
decodeBuffer[decodeLoc] = ((buffer[0] << 2) & 0xFC) + ((buffer[1] >> 4) & 0x3F);
|
||||
decodeBuffer[decodeLoc + 1] = ((buffer[1] << 4) & 0xF0) + ((buffer[2] >> 2) & 0x0F);
|
||||
decodeBuffer[decodeLoc + 2] = ((buffer[2] << 6) & 0xC0) + (buffer[3] & 0x3F);
|
||||
|
||||
decodeLoc += 3;
|
||||
buffer[0] = 0;
|
||||
buffer[1] = 0;
|
||||
buffer[2] = 0;
|
||||
buffer[3] = 0;
|
||||
}
|
||||
|
||||
curChar++;
|
||||
}
|
||||
|
||||
if(bytesToDrop != 0) {
|
||||
if((decodeLoc + 3) >= decodeBufferSize) {
|
||||
decodeBufferSize <<= 1;
|
||||
decodeBuffer = (unsigned char*) realloc(decodeBuffer, decodeBufferSize);
|
||||
}
|
||||
|
||||
decodeBuffer[decodeLoc] = ((buffer[0] << 2) & 0xFC) | ((buffer[1] >> 4) & 0x3F);
|
||||
|
||||
if(bytesToDrop <= 2)
|
||||
decodeBuffer[decodeLoc + 1] = ((buffer[1] << 4) & 0xF0) | ((buffer[2] >> 2) & 0x0F);
|
||||
|
||||
if(bytesToDrop <= 1)
|
||||
decodeBuffer[decodeLoc + 2] = ((buffer[2] << 6) & 0xC0) | (buffer[3] & 0x3F);
|
||||
|
||||
*dataLength = decodeLoc + 3 - bytesToDrop;
|
||||
} else {
|
||||
*dataLength = decodeLoc;
|
||||
}
|
||||
|
||||
return decodeBuffer;
|
||||
}
|
||||
|
||||
void writeBase64(AbstractFile* file, unsigned char* data, size_t dataLength, int tabLength, int width) {
|
||||
char* buffer;
|
||||
buffer = convertBase64(data, dataLength, tabLength, width);
|
||||
file->write(file, buffer, strlen(buffer));
|
||||
free(buffer);
|
||||
}
|
||||
|
||||
#define CHECK_BUFFER_SIZE() \
|
||||
if(pos == bufferSize) { \
|
||||
bufferSize <<= 1; \
|
||||
buffer = (unsigned char*) realloc(buffer, bufferSize); \
|
||||
}
|
||||
|
||||
#define CHECK_LINE_END_STRING() \
|
||||
CHECK_BUFFER_SIZE() \
|
||||
if(width == lineLength) { \
|
||||
buffer[pos++] = '\n'; \
|
||||
CHECK_BUFFER_SIZE() \
|
||||
for(j = 0; j < tabLength; j++) { \
|
||||
buffer[pos++] = '\t'; \
|
||||
CHECK_BUFFER_SIZE() \
|
||||
} \
|
||||
lineLength = 0; \
|
||||
} else { \
|
||||
lineLength++; \
|
||||
}
|
||||
|
||||
char* convertBase64(unsigned char* data, size_t dataLength, int tabLength, int width) {
|
||||
const char* dictionary = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
||||
|
||||
unsigned char* buffer;
|
||||
size_t pos;
|
||||
size_t bufferSize;
|
||||
int i, j;
|
||||
int lineLength;
|
||||
|
||||
bufferSize = 100;
|
||||
buffer = (unsigned char*) malloc(bufferSize);
|
||||
pos = 0;
|
||||
lineLength = 0;
|
||||
|
||||
for(i = 0; i < tabLength; i++) {
|
||||
CHECK_BUFFER_SIZE()
|
||||
buffer[pos++] = '\t';
|
||||
}
|
||||
i = 0;
|
||||
while(dataLength >= 3) {
|
||||
dataLength -= 3;
|
||||
buffer[pos++] = dictionary[(data[i] >> 2) & 0x3F];
|
||||
CHECK_LINE_END_STRING();
|
||||
buffer[pos++] = dictionary[(((data[i] << 4) & 0x30) | ((data[i+1] >> 4) & 0x0F)) & 0x3F];
|
||||
CHECK_LINE_END_STRING();
|
||||
buffer[pos++] = dictionary[(((data[i+1] << 2) & 0x3C) | ((data[i+2] >> 6) & 0x03)) & 0x03F];
|
||||
CHECK_LINE_END_STRING();
|
||||
buffer[pos++] = dictionary[data[i+2] & 0x3F];
|
||||
CHECK_LINE_END_STRING();
|
||||
i += 3;
|
||||
}
|
||||
|
||||
if(dataLength == 2) {
|
||||
buffer[pos++] = dictionary[(data[i] >> 2) & 0x3F];
|
||||
CHECK_LINE_END_STRING();
|
||||
buffer[pos++] = dictionary[(((data[i] << 4) & 0x30) | ((data[i+1] >> 4) & 0x0F)) & 0x3F];
|
||||
CHECK_LINE_END_STRING();
|
||||
buffer[pos++] = dictionary[(data[i+1] << 2) & 0x3C];
|
||||
CHECK_LINE_END_STRING();
|
||||
buffer[pos++] = '=';
|
||||
} else if(dataLength == 1) {
|
||||
buffer[pos++] = dictionary[(data[i] >> 2) & 0x3F];
|
||||
CHECK_LINE_END_STRING();
|
||||
buffer[pos++] = dictionary[(data[i] << 4) & 0x30];
|
||||
CHECK_LINE_END_STRING();
|
||||
buffer[pos++] = '=';
|
||||
CHECK_LINE_END_STRING();
|
||||
buffer[pos++] = '=';
|
||||
}
|
||||
|
||||
CHECK_BUFFER_SIZE();
|
||||
buffer[pos++] = '\n';
|
||||
|
||||
CHECK_BUFFER_SIZE();
|
||||
buffer[pos++] = '\0';
|
||||
|
||||
return (char*) buffer;
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
link_directories(${PROJECT_BINARY_DIR}/common ${PROJECT_BINARY_DIR}/hfs)
|
||||
|
||||
#set(COREFOUNDATION_LIBRARY CoreFoundation)
|
||||
|
||||
IF (APPLE)
|
||||
FIND_LIBRARY(COREFOUNDATION_LIBRARY CoreFoundation)
|
||||
ENDIF (APPLE)
|
||||
|
||||
add_executable(emf_decrypter emf_decrypter.c emf_init.c)
|
||||
target_link_libraries (emf_decrypter hfs common crypto ${COREFOUNDATION_LIBRARY})
|
||||
|
||||
install(TARGETS emf_decrypter DESTINATION .)
|
42
dump-imessages/iphone-dataprotection/emf_decrypter/emf/emf.h
Normal file
42
dump-imessages/iphone-dataprotection/emf_decrypter/emf/emf.h
Normal file
@ -0,0 +1,42 @@
|
||||
//As of iOS 4, class keys 1 to 4 are used for files, class 5 usage is unknown
|
||||
#define MAX_CLASS_KEYS 5
|
||||
#define CLASS_DKEY 4
|
||||
|
||||
typedef struct EMFInfo
|
||||
{
|
||||
Volume* volume;
|
||||
uint64_t volume_id;
|
||||
uint64_t volume_offset;
|
||||
uint32_t classKeys_bitset;
|
||||
AES_KEY emfkey;
|
||||
AES_KEY classKeys[MAX_CLASS_KEYS];
|
||||
}EMFInfo;
|
||||
|
||||
EMFInfo* EMF_init(Volume*, char*);
|
||||
|
||||
#define CPROTECT_V2_LENGTH 0x38 //56
|
||||
#define CP_WRAPPEDKEYSIZE 40 /* 2x4 = 8, 8x8 = 64 */
|
||||
|
||||
//http://www.opensource.apple.com/source/xnu/xnu-1699.22.73/bsd/sys/cprotect.h
|
||||
typedef struct cprotect_xattr_v2
|
||||
{
|
||||
uint16_t xattr_major_version; // =2
|
||||
uint16_t xattr_minor_version; // =0
|
||||
uint32_t flags; // leaks stack dword in one code path (cp_handle_vnop)
|
||||
uint32_t persistent_class;
|
||||
uint32_t key_size; //0x28
|
||||
uint8_t persistent_key[0x28];
|
||||
} cprotect_xattr_v2;
|
||||
|
||||
#define CPROTECT_V4_LENGTH 0x4C //76
|
||||
|
||||
typedef struct cprotect_xattr_v4
|
||||
{
|
||||
uint16_t xattr_major_version; // =4
|
||||
uint16_t xattr_minor_version; // =0
|
||||
uint32_t xxx_length; // 0xc
|
||||
uint32_t protection_class_id;
|
||||
uint32_t wrapped_length; //0x28
|
||||
uint8_t xxx_junk[20]; //uninitialized ?
|
||||
uint8_t wrapped_key[0x28];
|
||||
} cprotect_xattr_v4;
|
@ -0,0 +1,217 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <inttypes.h>
|
||||
#include <libgen.h>
|
||||
#include <openssl/aes.h>
|
||||
#include <hfs/hfslib.h>
|
||||
#include "emf.h"
|
||||
|
||||
char endianness;
|
||||
|
||||
void TestByteOrder()
|
||||
{
|
||||
short int word = 0x0001;
|
||||
char *byte = (char *) &word;
|
||||
endianness = byte[0] ? IS_LITTLE_ENDIAN : IS_BIG_ENDIAN;
|
||||
}
|
||||
|
||||
void iv_for_lba(uint32_t lba, uint32_t* iv)
|
||||
{
|
||||
int i;
|
||||
for(i = 0; i < 4; i++)
|
||||
{
|
||||
if(lba & 1)
|
||||
lba = 0x80000061 ^ (lba >> 1);
|
||||
else
|
||||
lba = lba >> 1;
|
||||
iv[i] = lba;
|
||||
}
|
||||
}
|
||||
|
||||
int EMF_unwrap_filekey_forclass(EMFInfo* emf, uint8_t* wrapped_file_key, uint32_t protection_class_id, AES_KEY* file_key)
|
||||
{
|
||||
uint8_t fk[32]={0};
|
||||
|
||||
if (protection_class_id < 1 || protection_class_id >= MAX_CLASS_KEYS)
|
||||
return -1;
|
||||
|
||||
if ((emf->classKeys_bitset & (1 << protection_class_id)) == 0)
|
||||
{
|
||||
printf("Class key %d not available\n", protection_class_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(AES_unwrap_key(&(emf->classKeys[protection_class_id-1]), NULL, fk, wrapped_file_key, 40)!= 32)
|
||||
{
|
||||
fprintf(stderr, "EMF_unwrap_filekey_forclass unwrap FAIL, protection_class_id=%d\n", protection_class_id);
|
||||
return -1;
|
||||
}
|
||||
AES_set_decrypt_key(fk, 32*8, file_key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void EMF_fix_and_decrypt_block(EMFInfo* emf, uint8_t* buffer, uint32_t lba, uint32_t blockSize, AES_KEY* filekey)
|
||||
{
|
||||
uint32_t volumeOffset = emf->volume_offset;
|
||||
uint32_t iv[4];
|
||||
|
||||
//reencrypt with emf key to get correct ciphertext
|
||||
iv_for_lba(volumeOffset + lba, iv);
|
||||
AES_cbc_encrypt(buffer, buffer, blockSize, &(emf->emfkey), (uint8_t*) iv, AES_ENCRYPT);
|
||||
|
||||
//decrypt with file key
|
||||
iv_for_lba(volumeOffset + lba, iv);
|
||||
AES_cbc_encrypt(buffer, buffer, blockSize, filekey, (uint8_t*) iv, AES_DECRYPT);
|
||||
}
|
||||
|
||||
int EMF_decrypt_file_blocks(EMFInfo* emf, HFSPlusCatalogFile* file, uint8_t* wrapped_file_key, uint32_t protection_class)
|
||||
{
|
||||
AES_KEY filekey;
|
||||
|
||||
if( EMF_unwrap_filekey_forclass(emf, wrapped_file_key, protection_class, &filekey))
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
io_func* io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, emf->volume);
|
||||
if(io == NULL)
|
||||
{
|
||||
fprintf(stderr, "openRawFile %d FAIL!\n", file->fileID);
|
||||
return -1;
|
||||
}
|
||||
RawFile* rawFile = (RawFile*) io->data;
|
||||
Extent* extent = rawFile->extents;
|
||||
uint32_t blockSize = emf->volume->volumeHeader->blockSize;
|
||||
uint32_t i;
|
||||
uint8_t* buffer = malloc(blockSize);
|
||||
|
||||
if(buffer == NULL)
|
||||
return -1;
|
||||
|
||||
//decrypt all blocks in all extents
|
||||
//the last block can contain stuff from erased files maybe ?
|
||||
while( extent != NULL)
|
||||
{
|
||||
for(i=0; i < extent->blockCount; i++)
|
||||
{
|
||||
if(READ(emf->volume->image, (extent->startBlock + i) * blockSize, blockSize, buffer))
|
||||
{
|
||||
EMF_fix_and_decrypt_block(emf, buffer, extent->startBlock + i, blockSize, &filekey);
|
||||
|
||||
//write back to image
|
||||
WRITE(emf->volume->image, (extent->startBlock + i) * blockSize, blockSize, buffer);
|
||||
}
|
||||
}
|
||||
extent = extent->next;
|
||||
}
|
||||
|
||||
free(buffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int EMF_decrypt_folder(EMFInfo* emf, HFSCatalogNodeID folderID)
|
||||
{
|
||||
CatalogRecordList* list;
|
||||
CatalogRecordList* theList;
|
||||
HFSPlusCatalogFolder* folder;
|
||||
HFSPlusCatalogFile* file;
|
||||
char* name;
|
||||
cprotect_xattr_v2* cprotect_xattr;
|
||||
uint8_t* wrapped_file_key;
|
||||
|
||||
theList = list = getFolderContents(folderID, emf->volume);
|
||||
|
||||
while(list != NULL)
|
||||
{
|
||||
name = unicodeToAscii(&list->name);
|
||||
|
||||
if(list->record->recordType == kHFSPlusFolderRecord)
|
||||
{
|
||||
folder = (HFSPlusCatalogFolder*)list->record;
|
||||
EMF_decrypt_folder(emf, folder->folderID);
|
||||
}
|
||||
else if(list->record->recordType == kHFSPlusFileRecord)
|
||||
{
|
||||
file = (HFSPlusCatalogFile*)list->record;
|
||||
|
||||
size_t attr_len = getAttribute(emf->volume, file->fileID, "com.apple.system.cprotect", (uint8_t**) &cprotect_xattr);
|
||||
|
||||
if(cprotect_xattr != NULL && attr_len > 0)
|
||||
{
|
||||
if (cprotect_xattr->xattr_major_version == 2 && attr_len == CPROTECT_V2_LENGTH)
|
||||
{
|
||||
printf("Decrypting %s\n", name);
|
||||
if(!EMF_decrypt_file_blocks(emf, file, cprotect_xattr->persistent_key, cprotect_xattr->persistent_class))
|
||||
{
|
||||
//TODO HAX: update cprotect xattr version field (bit1) to mark file as decrypted ?
|
||||
//cprotect_xattr->version |= 1;
|
||||
//setAttribute(volume, file->fileID, "com.apple.system.cprotect", (uint8_t*) cprotect_xattr, CPROTECT_V2_LENGTH);
|
||||
}
|
||||
}
|
||||
else if (cprotect_xattr->xattr_major_version == 4 && attr_len == CPROTECT_V4_LENGTH)
|
||||
{
|
||||
//not just yet :)
|
||||
}
|
||||
else if (cprotect_xattr->xattr_major_version & 1)
|
||||
{
|
||||
//TODO: file already decrypted by this tool ?
|
||||
}
|
||||
else
|
||||
{
|
||||
fprintf(stderr, "Unknown cprotect xattr version/length : %x/%zx\n", cprotect_xattr->xattr_major_version, attr_len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free(name);
|
||||
list = list->next;
|
||||
}
|
||||
releaseCatalogRecordList(theList);
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
io_func* io;
|
||||
Volume* volume;
|
||||
|
||||
TestByteOrder();
|
||||
|
||||
if(argc < 2) {
|
||||
printf("usage: %s <image-file>\n", argv[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
io = openFlatFile(argv[1]);
|
||||
|
||||
if(io == NULL) {
|
||||
fprintf(stderr, "error: Cannot open image-file.\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
volume = openVolume(io);
|
||||
if(volume == NULL) {
|
||||
fprintf(stderr, "error: Cannot open volume.\n");
|
||||
CLOSE(io);
|
||||
return 1;
|
||||
}
|
||||
printf("WARNING ! This tool will modify the hfs image and possibly wreck it if something goes wrong !\n"
|
||||
"Make sure to backup the image before proceeding\n");
|
||||
printf("Press a key to continue or CTRL-C to abort\n");
|
||||
getchar();
|
||||
|
||||
char* dir = dirname((char*)argv[1]);
|
||||
EMFInfo* emf = EMF_init(volume, dir);
|
||||
|
||||
if(emf != NULL)
|
||||
{
|
||||
EMF_decrypt_folder(emf, kHFSRootFolderID);
|
||||
}
|
||||
|
||||
closeVolume(volume);
|
||||
CLOSE(io);
|
||||
|
||||
return 0;
|
||||
}
|
@ -0,0 +1,146 @@
|
||||
#include <stdio.h>
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
#include <openssl/aes.h>
|
||||
#include "hfs/hfslib.h"
|
||||
#include "emf.h"
|
||||
|
||||
size_t ConvertHexaCFString(CFStringRef s, uint8_t** bytes)
|
||||
{
|
||||
uint32_t len = CFStringGetLength(s);
|
||||
uint8_t* hex = malloc(len+1);
|
||||
|
||||
if(hex == NULL)
|
||||
return 0;
|
||||
|
||||
if(!CFStringGetCString(s, hex, len+1, kCFStringEncodingASCII))
|
||||
{
|
||||
free(hex);
|
||||
return 0;
|
||||
}
|
||||
size_t size = 0;
|
||||
hexToBytes(hex, bytes, &size);
|
||||
free(hex);
|
||||
return size;
|
||||
}
|
||||
|
||||
void grabClassKey(const void *key, const void *value, void *context)
|
||||
{
|
||||
EMFInfo* emf = (EMFInfo*) context;
|
||||
uint8_t* class_key = NULL;
|
||||
|
||||
if(CFGetTypeID(key) != CFStringGetTypeID() || CFGetTypeID(value) != CFStringGetTypeID())
|
||||
return;
|
||||
|
||||
SInt32 clas = CFStringGetIntValue((CFStringRef)key);
|
||||
|
||||
if(clas > 0 && clas <= MAX_CLASS_KEYS && CFStringGetLength((CFStringRef) value) == 64)
|
||||
{
|
||||
if(ConvertHexaCFString(value, &class_key) == 32)
|
||||
{
|
||||
AES_set_decrypt_key(class_key, 32*8, &(emf->classKeys[clas-1]));
|
||||
free(class_key);
|
||||
emf->classKeys_bitset |= 1 << clas;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
EMFInfo* EMF_init(Volume* volume, char* imagePath)
|
||||
{
|
||||
uint8_t* emfk = NULL;
|
||||
uint8_t* dkey = NULL;
|
||||
|
||||
uint64_t volume_id = *((uint64_t*) (&volume->volumeHeader->finderInfo[6]));
|
||||
FLIPENDIAN(volume_id);
|
||||
|
||||
if(imagePath == NULL)
|
||||
imagePath = ".";
|
||||
|
||||
printf("Volume identifier : %llx\n", volume_id);
|
||||
printf("Searching for %s/%llx.plist\n", imagePath, volume_id);
|
||||
|
||||
CFStringRef path = CFStringCreateWithFormat(NULL, NULL, CFSTR("%s/%llx.plist"), imagePath, volume_id);
|
||||
|
||||
CFURLRef fileURL = CFURLCreateWithFileSystemPath(NULL, path, kCFURLPOSIXPathStyle, FALSE);
|
||||
CFRelease(path);
|
||||
|
||||
CFReadStreamRef stream = CFReadStreamCreateWithFile(NULL, fileURL);
|
||||
CFRelease(fileURL);
|
||||
|
||||
if(stream == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
if(!CFReadStreamOpen(stream))
|
||||
{
|
||||
fprintf(stderr, "Cannot open file\n");
|
||||
return NULL;
|
||||
}
|
||||
CFPropertyListRef dict = CFPropertyListCreateWithStream(NULL, stream, 0, kCFPropertyListImmutable, NULL, NULL);
|
||||
|
||||
CFRelease(stream);
|
||||
|
||||
if (dict == NULL || CFGetTypeID(dict) != CFDictionaryGetTypeID())
|
||||
return NULL;
|
||||
|
||||
CFStringRef emfHex = CFDictionaryGetValue(dict, CFSTR("EMF"));
|
||||
CFStringRef dkeyHex = CFDictionaryGetValue(dict, CFSTR("DKey"));
|
||||
CFNumberRef dataVolumeOffset = CFDictionaryGetValue (dict, CFSTR("dataVolumeOffset"));
|
||||
|
||||
if (emfHex == NULL || CFGetTypeID(emfHex) != CFStringGetTypeID())
|
||||
return NULL;
|
||||
if (dkeyHex == NULL || CFGetTypeID(dkeyHex) != CFStringGetTypeID())
|
||||
return NULL;
|
||||
if (dataVolumeOffset == NULL || CFGetTypeID(dataVolumeOffset) != CFNumberGetTypeID())
|
||||
return NULL;
|
||||
|
||||
EMFInfo* emf = malloc(sizeof(EMFInfo));
|
||||
|
||||
if(emf == NULL)
|
||||
return NULL;
|
||||
|
||||
memset(emf, 0, sizeof(EMFInfo));
|
||||
|
||||
emf->volume = volume;
|
||||
|
||||
CFNumberGetValue(dataVolumeOffset, kCFNumberLongType, &emf->volume_offset);
|
||||
|
||||
printf("Data partition offset = %llx\n", emf->volume_offset);
|
||||
|
||||
if(ConvertHexaCFString(emfHex, &emfk) != 32)
|
||||
{
|
||||
fprintf(stderr, "Invalid EMF key\n");
|
||||
free(emf);
|
||||
return NULL;
|
||||
}
|
||||
if(ConvertHexaCFString(dkeyHex, &dkey) != 32)
|
||||
{
|
||||
fprintf(stderr, "Invalid DKey key\n");
|
||||
free(emf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
AES_set_encrypt_key(emfk, 32*8, &(emf->emfkey));
|
||||
AES_set_decrypt_key(dkey, 32*8, &(emf->classKeys[CLASS_DKEY-1]));
|
||||
emf->classKeys_bitset |= 1 << CLASS_DKEY;
|
||||
|
||||
CFDictionaryRef classKeys = CFDictionaryGetValue(dict, CFSTR("classKeys"));
|
||||
|
||||
if(classKeys != NULL && CFGetTypeID(classKeys) == CFDictionaryGetTypeID())
|
||||
{
|
||||
printf("Reading class keys, NSProtectionComplete files should be decrypted OK\n");
|
||||
CFDictionaryApplyFunction(classKeys, grabClassKey, (void*) emf);
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Only NSProtectionNone files will be decrypted\n");
|
||||
}
|
||||
|
||||
free(emfk);
|
||||
free(dkey);
|
||||
return emf;
|
||||
}
|
@ -0,0 +1,18 @@
|
||||
INCLUDE(FindZLIB)
|
||||
|
||||
IF(NOT ZLIB_FOUND)
|
||||
message(FATAL_ERROR "zlib is required for hfs!")
|
||||
ENDIF(NOT ZLIB_FOUND)
|
||||
|
||||
include_directories(${ZLIB_INCLUDE_DIR})
|
||||
link_directories(${ZLIB_LIBRARIES})
|
||||
|
||||
link_directories (${PROJECT_BINARY_DIR}/common)
|
||||
add_library(hfs btree.c catalog.c extents.c xattr.c fastunicodecompare.c flatfile.c hfslib.c rawfile.c utility.c volume.c hfscompress.c)
|
||||
target_link_libraries(hfs common z)
|
||||
|
||||
add_executable(hfsplus hfs.c)
|
||||
target_link_libraries (hfsplus hfs)
|
||||
|
||||
install(TARGETS hfsplus DESTINATION .)
|
||||
|
1556
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/btree.c
Normal file
1556
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/btree.c
Normal file
File diff suppressed because it is too large
Load Diff
1165
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/catalog.c
Normal file
1165
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/catalog.c
Normal file
File diff suppressed because it is too large
Load Diff
119
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/extents.c
Normal file
119
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/extents.c
Normal file
@ -0,0 +1,119 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
|
||||
static inline void flipExtentDescriptor(HFSPlusExtentDescriptor* extentDescriptor) {
|
||||
FLIPENDIAN(extentDescriptor->startBlock);
|
||||
FLIPENDIAN(extentDescriptor->blockCount);
|
||||
}
|
||||
|
||||
void flipExtentRecord(HFSPlusExtentRecord* extentRecord) {
|
||||
HFSPlusExtentDescriptor *extentDescriptor;
|
||||
extentDescriptor = (HFSPlusExtentDescriptor*)extentRecord;
|
||||
|
||||
flipExtentDescriptor(&extentDescriptor[0]);
|
||||
flipExtentDescriptor(&extentDescriptor[1]);
|
||||
flipExtentDescriptor(&extentDescriptor[2]);
|
||||
flipExtentDescriptor(&extentDescriptor[3]);
|
||||
flipExtentDescriptor(&extentDescriptor[4]);
|
||||
flipExtentDescriptor(&extentDescriptor[5]);
|
||||
flipExtentDescriptor(&extentDescriptor[6]);
|
||||
flipExtentDescriptor(&extentDescriptor[7]);
|
||||
}
|
||||
|
||||
static int extentCompare(BTKey* vLeft, BTKey* vRight) {
|
||||
HFSPlusExtentKey* left;
|
||||
HFSPlusExtentKey* right;
|
||||
|
||||
left = (HFSPlusExtentKey*) vLeft;
|
||||
right =(HFSPlusExtentKey*) vRight;
|
||||
|
||||
if(left->forkType < right->forkType) {
|
||||
return -1;
|
||||
} else if(left->forkType > right->forkType) {
|
||||
return 1;
|
||||
} else {
|
||||
if(left->fileID < right->fileID) {
|
||||
return -1;
|
||||
} else if(left->fileID > right->fileID) {
|
||||
return 1;
|
||||
} else {
|
||||
if(left->startBlock < right->startBlock) {
|
||||
return -1;
|
||||
} else if(left->startBlock > right->startBlock) {
|
||||
return 1;
|
||||
} else {
|
||||
/* do a safety check on key length. Otherwise, bad things may happen later on when we try to add or remove with this key */
|
||||
if(left->keyLength == right->keyLength) {
|
||||
return 0;
|
||||
} else if(left->keyLength < right->keyLength) {
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static BTKey* extentKeyRead(off_t offset, io_func* io) {
|
||||
HFSPlusExtentKey* key;
|
||||
|
||||
key = (HFSPlusExtentKey*) malloc(sizeof(HFSPlusExtentKey));
|
||||
|
||||
if(!READ(io, offset, sizeof(HFSPlusExtentKey), key))
|
||||
return NULL;
|
||||
|
||||
FLIPENDIAN(key->keyLength);
|
||||
FLIPENDIAN(key->forkType);
|
||||
FLIPENDIAN(key->fileID);
|
||||
FLIPENDIAN(key->startBlock);
|
||||
|
||||
return (BTKey*)key;
|
||||
}
|
||||
|
||||
static int extentKeyWrite(off_t offset, BTKey* toWrite, io_func* io) {
|
||||
HFSPlusExtentKey* key;
|
||||
|
||||
key = (HFSPlusExtentKey*) malloc(sizeof(HFSPlusExtentKey));
|
||||
|
||||
memcpy(key, toWrite, sizeof(HFSPlusExtentKey));
|
||||
|
||||
FLIPENDIAN(key->keyLength);
|
||||
FLIPENDIAN(key->forkType);
|
||||
FLIPENDIAN(key->fileID);
|
||||
FLIPENDIAN(key->startBlock);
|
||||
|
||||
if(!WRITE(io, offset, sizeof(HFSPlusExtentKey), key))
|
||||
return FALSE;
|
||||
|
||||
free(key);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void extentKeyPrint(BTKey* toPrint) {
|
||||
HFSPlusExtentKey* key;
|
||||
|
||||
key = (HFSPlusExtentKey*)toPrint;
|
||||
|
||||
printf("extent%d:%d:%d", key->forkType, key->fileID, key->startBlock);
|
||||
}
|
||||
|
||||
static BTKey* extentDataRead(off_t offset, io_func* io) {
|
||||
HFSPlusExtentRecord* record;
|
||||
|
||||
record = (HFSPlusExtentRecord*) malloc(sizeof(HFSPlusExtentRecord));
|
||||
|
||||
if(!READ(io, offset, sizeof(HFSPlusExtentRecord), record))
|
||||
return NULL;
|
||||
|
||||
flipExtentRecord(record);
|
||||
|
||||
return (BTKey*)record;
|
||||
}
|
||||
|
||||
BTree* openExtentsTree(io_func* file) {
|
||||
return openBTree(file, &extentCompare, &extentKeyRead, &extentKeyWrite, &extentKeyPrint, &extentDataRead);
|
||||
}
|
@ -0,0 +1,418 @@
|
||||
#include <stdint.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
|
||||
/* This routine is taken from Apple's TN 1150, with adaptations for C */
|
||||
|
||||
/* The lower case table consists of a 256-entry high-byte table followed by
|
||||
some number of 256-entry subtables. The high-byte table contains either an
|
||||
offset to the subtable for characters with that high byte or zero, which
|
||||
means that there are no case mappings or ignored characters in that block.
|
||||
Ignored characters are mapped to zero.
|
||||
*/
|
||||
|
||||
uint16_t gLowerCaseTable[] = {
|
||||
|
||||
/* 0 */ 0x0100, 0x0200, 0x0000, 0x0300, 0x0400, 0x0500, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 1 */ 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 2 */ 0x0700, 0x0800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 3 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 4 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 5 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 6 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 7 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 9 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* A */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* B */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* C */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* D */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* E */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* F */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0900, 0x0A00,
|
||||
|
||||
/* 0 */ 0xFFFF, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
|
||||
0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
|
||||
/* 1 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
|
||||
0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
|
||||
/* 2 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
|
||||
0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
|
||||
/* 3 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
|
||||
0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
|
||||
/* 4 */ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
|
||||
0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
|
||||
/* 5 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
|
||||
0x0078, 0x0079, 0x007A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
|
||||
/* 6 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
|
||||
0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
|
||||
/* 7 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
|
||||
0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,
|
||||
/* 8 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
|
||||
0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
|
||||
/* 9 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
|
||||
0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
|
||||
/* A */ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,
|
||||
0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
|
||||
/* B */ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,
|
||||
0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
|
||||
/* C */ 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00E6, 0x00C7,
|
||||
0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,
|
||||
/* D */ 0x00F0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,
|
||||
0x00F8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00FE, 0x00DF,
|
||||
/* E */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,
|
||||
0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,
|
||||
/* F */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,
|
||||
0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF,
|
||||
|
||||
/* 0 */ 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107,
|
||||
0x0108, 0x0109, 0x010A, 0x010B, 0x010C, 0x010D, 0x010E, 0x010F,
|
||||
/* 1 */ 0x0111, 0x0111, 0x0112, 0x0113, 0x0114, 0x0115, 0x0116, 0x0117,
|
||||
0x0118, 0x0119, 0x011A, 0x011B, 0x011C, 0x011D, 0x011E, 0x011F,
|
||||
/* 2 */ 0x0120, 0x0121, 0x0122, 0x0123, 0x0124, 0x0125, 0x0127, 0x0127,
|
||||
0x0128, 0x0129, 0x012A, 0x012B, 0x012C, 0x012D, 0x012E, 0x012F,
|
||||
/* 3 */ 0x0130, 0x0131, 0x0133, 0x0133, 0x0134, 0x0135, 0x0136, 0x0137,
|
||||
0x0138, 0x0139, 0x013A, 0x013B, 0x013C, 0x013D, 0x013E, 0x0140,
|
||||
/* 4 */ 0x0140, 0x0142, 0x0142, 0x0143, 0x0144, 0x0145, 0x0146, 0x0147,
|
||||
0x0148, 0x0149, 0x014B, 0x014B, 0x014C, 0x014D, 0x014E, 0x014F,
|
||||
/* 5 */ 0x0150, 0x0151, 0x0153, 0x0153, 0x0154, 0x0155, 0x0156, 0x0157,
|
||||
0x0158, 0x0159, 0x015A, 0x015B, 0x015C, 0x015D, 0x015E, 0x015F,
|
||||
/* 6 */ 0x0160, 0x0161, 0x0162, 0x0163, 0x0164, 0x0165, 0x0167, 0x0167,
|
||||
0x0168, 0x0169, 0x016A, 0x016B, 0x016C, 0x016D, 0x016E, 0x016F,
|
||||
/* 7 */ 0x0170, 0x0171, 0x0172, 0x0173, 0x0174, 0x0175, 0x0176, 0x0177,
|
||||
0x0178, 0x0179, 0x017A, 0x017B, 0x017C, 0x017D, 0x017E, 0x017F,
|
||||
/* 8 */ 0x0180, 0x0253, 0x0183, 0x0183, 0x0185, 0x0185, 0x0254, 0x0188,
|
||||
0x0188, 0x0256, 0x0257, 0x018C, 0x018C, 0x018D, 0x01DD, 0x0259,
|
||||
/* 9 */ 0x025B, 0x0192, 0x0192, 0x0260, 0x0263, 0x0195, 0x0269, 0x0268,
|
||||
0x0199, 0x0199, 0x019A, 0x019B, 0x026F, 0x0272, 0x019E, 0x0275,
|
||||
/* A */ 0x01A0, 0x01A1, 0x01A3, 0x01A3, 0x01A5, 0x01A5, 0x01A6, 0x01A8,
|
||||
0x01A8, 0x0283, 0x01AA, 0x01AB, 0x01AD, 0x01AD, 0x0288, 0x01AF,
|
||||
/* B */ 0x01B0, 0x028A, 0x028B, 0x01B4, 0x01B4, 0x01B6, 0x01B6, 0x0292,
|
||||
0x01B9, 0x01B9, 0x01BA, 0x01BB, 0x01BD, 0x01BD, 0x01BE, 0x01BF,
|
||||
/* C */ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C6, 0x01C6, 0x01C6, 0x01C9,
|
||||
0x01C9, 0x01C9, 0x01CC, 0x01CC, 0x01CC, 0x01CD, 0x01CE, 0x01CF,
|
||||
/* D */ 0x01D0, 0x01D1, 0x01D2, 0x01D3, 0x01D4, 0x01D5, 0x01D6, 0x01D7,
|
||||
0x01D8, 0x01D9, 0x01DA, 0x01DB, 0x01DC, 0x01DD, 0x01DE, 0x01DF,
|
||||
/* E */ 0x01E0, 0x01E1, 0x01E2, 0x01E3, 0x01E5, 0x01E5, 0x01E6, 0x01E7,
|
||||
0x01E8, 0x01E9, 0x01EA, 0x01EB, 0x01EC, 0x01ED, 0x01EE, 0x01EF,
|
||||
/* F */ 0x01F0, 0x01F3, 0x01F3, 0x01F3, 0x01F4, 0x01F5, 0x01F6, 0x01F7,
|
||||
0x01F8, 0x01F9, 0x01FA, 0x01FB, 0x01FC, 0x01FD, 0x01FE, 0x01FF,
|
||||
|
||||
/* 0 */ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307,
|
||||
0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F,
|
||||
/* 1 */ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317,
|
||||
0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F,
|
||||
/* 2 */ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327,
|
||||
0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F,
|
||||
/* 3 */ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337,
|
||||
0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F,
|
||||
/* 4 */ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0345, 0x0346, 0x0347,
|
||||
0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F,
|
||||
/* 5 */ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357,
|
||||
0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F,
|
||||
/* 6 */ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367,
|
||||
0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F,
|
||||
/* 7 */ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377,
|
||||
0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F,
|
||||
/* 8 */ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386, 0x0387,
|
||||
0x0388, 0x0389, 0x038A, 0x038B, 0x038C, 0x038D, 0x038E, 0x038F,
|
||||
/* 9 */ 0x0390, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7,
|
||||
0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF,
|
||||
/* A */ 0x03C0, 0x03C1, 0x03A2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7,
|
||||
0x03C8, 0x03C9, 0x03AA, 0x03AB, 0x03AC, 0x03AD, 0x03AE, 0x03AF,
|
||||
/* B */ 0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7,
|
||||
0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF,
|
||||
/* C */ 0x03C0, 0x03C1, 0x03C2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7,
|
||||
0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03CC, 0x03CD, 0x03CE, 0x03CF,
|
||||
/* D */ 0x03D0, 0x03D1, 0x03D2, 0x03D3, 0x03D4, 0x03D5, 0x03D6, 0x03D7,
|
||||
0x03D8, 0x03D9, 0x03DA, 0x03DB, 0x03DC, 0x03DD, 0x03DE, 0x03DF,
|
||||
/* E */ 0x03E0, 0x03E1, 0x03E3, 0x03E3, 0x03E5, 0x03E5, 0x03E7, 0x03E7,
|
||||
0x03E9, 0x03E9, 0x03EB, 0x03EB, 0x03ED, 0x03ED, 0x03EF, 0x03EF,
|
||||
/* F */ 0x03F0, 0x03F1, 0x03F2, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7,
|
||||
0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF,
|
||||
|
||||
/* 0 */ 0x0400, 0x0401, 0x0452, 0x0403, 0x0454, 0x0455, 0x0456, 0x0407,
|
||||
0x0458, 0x0459, 0x045A, 0x045B, 0x040C, 0x040D, 0x040E, 0x045F,
|
||||
/* 1 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437,
|
||||
0x0438, 0x0419, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F,
|
||||
/* 2 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447,
|
||||
0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F,
|
||||
/* 3 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437,
|
||||
0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F,
|
||||
/* 4 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447,
|
||||
0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F,
|
||||
/* 5 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457,
|
||||
0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F,
|
||||
/* 6 */ 0x0461, 0x0461, 0x0463, 0x0463, 0x0465, 0x0465, 0x0467, 0x0467,
|
||||
0x0469, 0x0469, 0x046B, 0x046B, 0x046D, 0x046D, 0x046F, 0x046F,
|
||||
/* 7 */ 0x0471, 0x0471, 0x0473, 0x0473, 0x0475, 0x0475, 0x0476, 0x0477,
|
||||
0x0479, 0x0479, 0x047B, 0x047B, 0x047D, 0x047D, 0x047F, 0x047F,
|
||||
/* 8 */ 0x0481, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487,
|
||||
0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048D, 0x048E, 0x048F,
|
||||
/* 9 */ 0x0491, 0x0491, 0x0493, 0x0493, 0x0495, 0x0495, 0x0497, 0x0497,
|
||||
0x0499, 0x0499, 0x049B, 0x049B, 0x049D, 0x049D, 0x049F, 0x049F,
|
||||
/* A */ 0x04A1, 0x04A1, 0x04A3, 0x04A3, 0x04A5, 0x04A5, 0x04A7, 0x04A7,
|
||||
0x04A9, 0x04A9, 0x04AB, 0x04AB, 0x04AD, 0x04AD, 0x04AF, 0x04AF,
|
||||
/* B */ 0x04B1, 0x04B1, 0x04B3, 0x04B3, 0x04B5, 0x04B5, 0x04B7, 0x04B7,
|
||||
0x04B9, 0x04B9, 0x04BB, 0x04BB, 0x04BD, 0x04BD, 0x04BF, 0x04BF,
|
||||
/* C */ 0x04C0, 0x04C1, 0x04C2, 0x04C4, 0x04C4, 0x04C5, 0x04C6, 0x04C8,
|
||||
0x04C8, 0x04C9, 0x04CA, 0x04CC, 0x04CC, 0x04CD, 0x04CE, 0x04CF,
|
||||
/* D */ 0x04D0, 0x04D1, 0x04D2, 0x04D3, 0x04D4, 0x04D5, 0x04D6, 0x04D7,
|
||||
0x04D8, 0x04D9, 0x04DA, 0x04DB, 0x04DC, 0x04DD, 0x04DE, 0x04DF,
|
||||
/* E */ 0x04E0, 0x04E1, 0x04E2, 0x04E3, 0x04E4, 0x04E5, 0x04E6, 0x04E7,
|
||||
0x04E8, 0x04E9, 0x04EA, 0x04EB, 0x04EC, 0x04ED, 0x04EE, 0x04EF,
|
||||
/* F */ 0x04F0, 0x04F1, 0x04F2, 0x04F3, 0x04F4, 0x04F5, 0x04F6, 0x04F7,
|
||||
0x04F8, 0x04F9, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF,
|
||||
|
||||
/* 0 */ 0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507,
|
||||
0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F,
|
||||
/* 1 */ 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517,
|
||||
0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F,
|
||||
/* 2 */ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527,
|
||||
0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F,
|
||||
/* 3 */ 0x0530, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567,
|
||||
0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F,
|
||||
/* 4 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577,
|
||||
0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F,
|
||||
/* 5 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0557,
|
||||
0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F,
|
||||
/* 6 */ 0x0560, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567,
|
||||
0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F,
|
||||
/* 7 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577,
|
||||
0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F,
|
||||
/* 8 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0587,
|
||||
0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F,
|
||||
/* 9 */ 0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597,
|
||||
0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F,
|
||||
/* A */ 0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7,
|
||||
0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF,
|
||||
/* B */ 0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7,
|
||||
0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF,
|
||||
/* C */ 0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7,
|
||||
0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF,
|
||||
/* D */ 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7,
|
||||
0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF,
|
||||
/* E */ 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7,
|
||||
0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF,
|
||||
/* F */ 0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7,
|
||||
0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF,
|
||||
|
||||
/* 0 */ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007,
|
||||
0x1008, 0x1009, 0x100A, 0x100B, 0x100C, 0x100D, 0x100E, 0x100F,
|
||||
/* 1 */ 0x1010, 0x1011, 0x1012, 0x1013, 0x1014, 0x1015, 0x1016, 0x1017,
|
||||
0x1018, 0x1019, 0x101A, 0x101B, 0x101C, 0x101D, 0x101E, 0x101F,
|
||||
/* 2 */ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027,
|
||||
0x1028, 0x1029, 0x102A, 0x102B, 0x102C, 0x102D, 0x102E, 0x102F,
|
||||
/* 3 */ 0x1030, 0x1031, 0x1032, 0x1033, 0x1034, 0x1035, 0x1036, 0x1037,
|
||||
0x1038, 0x1039, 0x103A, 0x103B, 0x103C, 0x103D, 0x103E, 0x103F,
|
||||
/* 4 */ 0x1040, 0x1041, 0x1042, 0x1043, 0x1044, 0x1045, 0x1046, 0x1047,
|
||||
0x1048, 0x1049, 0x104A, 0x104B, 0x104C, 0x104D, 0x104E, 0x104F,
|
||||
/* 5 */ 0x1050, 0x1051, 0x1052, 0x1053, 0x1054, 0x1055, 0x1056, 0x1057,
|
||||
0x1058, 0x1059, 0x105A, 0x105B, 0x105C, 0x105D, 0x105E, 0x105F,
|
||||
/* 6 */ 0x1060, 0x1061, 0x1062, 0x1063, 0x1064, 0x1065, 0x1066, 0x1067,
|
||||
0x1068, 0x1069, 0x106A, 0x106B, 0x106C, 0x106D, 0x106E, 0x106F,
|
||||
/* 7 */ 0x1070, 0x1071, 0x1072, 0x1073, 0x1074, 0x1075, 0x1076, 0x1077,
|
||||
0x1078, 0x1079, 0x107A, 0x107B, 0x107C, 0x107D, 0x107E, 0x107F,
|
||||
/* 8 */ 0x1080, 0x1081, 0x1082, 0x1083, 0x1084, 0x1085, 0x1086, 0x1087,
|
||||
0x1088, 0x1089, 0x108A, 0x108B, 0x108C, 0x108D, 0x108E, 0x108F,
|
||||
/* 9 */ 0x1090, 0x1091, 0x1092, 0x1093, 0x1094, 0x1095, 0x1096, 0x1097,
|
||||
0x1098, 0x1099, 0x109A, 0x109B, 0x109C, 0x109D, 0x109E, 0x109F,
|
||||
/* A */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7,
|
||||
0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF,
|
||||
/* B */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7,
|
||||
0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF,
|
||||
/* C */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10C6, 0x10C7,
|
||||
0x10C8, 0x10C9, 0x10CA, 0x10CB, 0x10CC, 0x10CD, 0x10CE, 0x10CF,
|
||||
/* D */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7,
|
||||
0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF,
|
||||
/* E */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7,
|
||||
0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF,
|
||||
/* F */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10F6, 0x10F7,
|
||||
0x10F8, 0x10F9, 0x10FA, 0x10FB, 0x10FC, 0x10FD, 0x10FE, 0x10FF,
|
||||
|
||||
/* 0 */ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007,
|
||||
0x2008, 0x2009, 0x200A, 0x200B, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 1 */ 0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2016, 0x2017,
|
||||
0x2018, 0x2019, 0x201A, 0x201B, 0x201C, 0x201D, 0x201E, 0x201F,
|
||||
/* 2 */ 0x2020, 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027,
|
||||
0x2028, 0x2029, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x202F,
|
||||
/* 3 */ 0x2030, 0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037,
|
||||
0x2038, 0x2039, 0x203A, 0x203B, 0x203C, 0x203D, 0x203E, 0x203F,
|
||||
/* 4 */ 0x2040, 0x2041, 0x2042, 0x2043, 0x2044, 0x2045, 0x2046, 0x2047,
|
||||
0x2048, 0x2049, 0x204A, 0x204B, 0x204C, 0x204D, 0x204E, 0x204F,
|
||||
/* 5 */ 0x2050, 0x2051, 0x2052, 0x2053, 0x2054, 0x2055, 0x2056, 0x2057,
|
||||
0x2058, 0x2059, 0x205A, 0x205B, 0x205C, 0x205D, 0x205E, 0x205F,
|
||||
/* 6 */ 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x2065, 0x2066, 0x2067,
|
||||
0x2068, 0x2069, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
/* 7 */ 0x2070, 0x2071, 0x2072, 0x2073, 0x2074, 0x2075, 0x2076, 0x2077,
|
||||
0x2078, 0x2079, 0x207A, 0x207B, 0x207C, 0x207D, 0x207E, 0x207F,
|
||||
/* 8 */ 0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087,
|
||||
0x2088, 0x2089, 0x208A, 0x208B, 0x208C, 0x208D, 0x208E, 0x208F,
|
||||
/* 9 */ 0x2090, 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, 0x2097,
|
||||
0x2098, 0x2099, 0x209A, 0x209B, 0x209C, 0x209D, 0x209E, 0x209F,
|
||||
/* A */ 0x20A0, 0x20A1, 0x20A2, 0x20A3, 0x20A4, 0x20A5, 0x20A6, 0x20A7,
|
||||
0x20A8, 0x20A9, 0x20AA, 0x20AB, 0x20AC, 0x20AD, 0x20AE, 0x20AF,
|
||||
/* B */ 0x20B0, 0x20B1, 0x20B2, 0x20B3, 0x20B4, 0x20B5, 0x20B6, 0x20B7,
|
||||
0x20B8, 0x20B9, 0x20BA, 0x20BB, 0x20BC, 0x20BD, 0x20BE, 0x20BF,
|
||||
/* C */ 0x20C0, 0x20C1, 0x20C2, 0x20C3, 0x20C4, 0x20C5, 0x20C6, 0x20C7,
|
||||
0x20C8, 0x20C9, 0x20CA, 0x20CB, 0x20CC, 0x20CD, 0x20CE, 0x20CF,
|
||||
/* D */ 0x20D0, 0x20D1, 0x20D2, 0x20D3, 0x20D4, 0x20D5, 0x20D6, 0x20D7,
|
||||
0x20D8, 0x20D9, 0x20DA, 0x20DB, 0x20DC, 0x20DD, 0x20DE, 0x20DF,
|
||||
/* E */ 0x20E0, 0x20E1, 0x20E2, 0x20E3, 0x20E4, 0x20E5, 0x20E6, 0x20E7,
|
||||
0x20E8, 0x20E9, 0x20EA, 0x20EB, 0x20EC, 0x20ED, 0x20EE, 0x20EF,
|
||||
/* F */ 0x20F0, 0x20F1, 0x20F2, 0x20F3, 0x20F4, 0x20F5, 0x20F6, 0x20F7,
|
||||
0x20F8, 0x20F9, 0x20FA, 0x20FB, 0x20FC, 0x20FD, 0x20FE, 0x20FF,
|
||||
|
||||
/* 0 */ 0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107,
|
||||
0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F,
|
||||
/* 1 */ 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117,
|
||||
0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F,
|
||||
/* 2 */ 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127,
|
||||
0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F,
|
||||
/* 3 */ 0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137,
|
||||
0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F,
|
||||
/* 4 */ 0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147,
|
||||
0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F,
|
||||
/* 5 */ 0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157,
|
||||
0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F,
|
||||
/* 6 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177,
|
||||
0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F,
|
||||
/* 7 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177,
|
||||
0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F,
|
||||
/* 8 */ 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187,
|
||||
0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F,
|
||||
/* 9 */ 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197,
|
||||
0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F,
|
||||
/* A */ 0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7,
|
||||
0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF,
|
||||
/* B */ 0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7,
|
||||
0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF,
|
||||
/* C */ 0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7,
|
||||
0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF,
|
||||
/* D */ 0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7,
|
||||
0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF,
|
||||
/* E */ 0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7,
|
||||
0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF,
|
||||
/* F */ 0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7,
|
||||
0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF,
|
||||
|
||||
/* 0 */ 0xFE00, 0xFE01, 0xFE02, 0xFE03, 0xFE04, 0xFE05, 0xFE06, 0xFE07,
|
||||
0xFE08, 0xFE09, 0xFE0A, 0xFE0B, 0xFE0C, 0xFE0D, 0xFE0E, 0xFE0F,
|
||||
/* 1 */ 0xFE10, 0xFE11, 0xFE12, 0xFE13, 0xFE14, 0xFE15, 0xFE16, 0xFE17,
|
||||
0xFE18, 0xFE19, 0xFE1A, 0xFE1B, 0xFE1C, 0xFE1D, 0xFE1E, 0xFE1F,
|
||||
/* 2 */ 0xFE20, 0xFE21, 0xFE22, 0xFE23, 0xFE24, 0xFE25, 0xFE26, 0xFE27,
|
||||
0xFE28, 0xFE29, 0xFE2A, 0xFE2B, 0xFE2C, 0xFE2D, 0xFE2E, 0xFE2F,
|
||||
/* 3 */ 0xFE30, 0xFE31, 0xFE32, 0xFE33, 0xFE34, 0xFE35, 0xFE36, 0xFE37,
|
||||
0xFE38, 0xFE39, 0xFE3A, 0xFE3B, 0xFE3C, 0xFE3D, 0xFE3E, 0xFE3F,
|
||||
/* 4 */ 0xFE40, 0xFE41, 0xFE42, 0xFE43, 0xFE44, 0xFE45, 0xFE46, 0xFE47,
|
||||
0xFE48, 0xFE49, 0xFE4A, 0xFE4B, 0xFE4C, 0xFE4D, 0xFE4E, 0xFE4F,
|
||||
/* 5 */ 0xFE50, 0xFE51, 0xFE52, 0xFE53, 0xFE54, 0xFE55, 0xFE56, 0xFE57,
|
||||
0xFE58, 0xFE59, 0xFE5A, 0xFE5B, 0xFE5C, 0xFE5D, 0xFE5E, 0xFE5F,
|
||||
/* 6 */ 0xFE60, 0xFE61, 0xFE62, 0xFE63, 0xFE64, 0xFE65, 0xFE66, 0xFE67,
|
||||
0xFE68, 0xFE69, 0xFE6A, 0xFE6B, 0xFE6C, 0xFE6D, 0xFE6E, 0xFE6F,
|
||||
/* 7 */ 0xFE70, 0xFE71, 0xFE72, 0xFE73, 0xFE74, 0xFE75, 0xFE76, 0xFE77,
|
||||
0xFE78, 0xFE79, 0xFE7A, 0xFE7B, 0xFE7C, 0xFE7D, 0xFE7E, 0xFE7F,
|
||||
/* 8 */ 0xFE80, 0xFE81, 0xFE82, 0xFE83, 0xFE84, 0xFE85, 0xFE86, 0xFE87,
|
||||
0xFE88, 0xFE89, 0xFE8A, 0xFE8B, 0xFE8C, 0xFE8D, 0xFE8E, 0xFE8F,
|
||||
/* 9 */ 0xFE90, 0xFE91, 0xFE92, 0xFE93, 0xFE94, 0xFE95, 0xFE96, 0xFE97,
|
||||
0xFE98, 0xFE99, 0xFE9A, 0xFE9B, 0xFE9C, 0xFE9D, 0xFE9E, 0xFE9F,
|
||||
/* A */ 0xFEA0, 0xFEA1, 0xFEA2, 0xFEA3, 0xFEA4, 0xFEA5, 0xFEA6, 0xFEA7,
|
||||
0xFEA8, 0xFEA9, 0xFEAA, 0xFEAB, 0xFEAC, 0xFEAD, 0xFEAE, 0xFEAF,
|
||||
/* B */ 0xFEB0, 0xFEB1, 0xFEB2, 0xFEB3, 0xFEB4, 0xFEB5, 0xFEB6, 0xFEB7,
|
||||
0xFEB8, 0xFEB9, 0xFEBA, 0xFEBB, 0xFEBC, 0xFEBD, 0xFEBE, 0xFEBF,
|
||||
/* C */ 0xFEC0, 0xFEC1, 0xFEC2, 0xFEC3, 0xFEC4, 0xFEC5, 0xFEC6, 0xFEC7,
|
||||
0xFEC8, 0xFEC9, 0xFECA, 0xFECB, 0xFECC, 0xFECD, 0xFECE, 0xFECF,
|
||||
/* D */ 0xFED0, 0xFED1, 0xFED2, 0xFED3, 0xFED4, 0xFED5, 0xFED6, 0xFED7,
|
||||
0xFED8, 0xFED9, 0xFEDA, 0xFEDB, 0xFEDC, 0xFEDD, 0xFEDE, 0xFEDF,
|
||||
/* E */ 0xFEE0, 0xFEE1, 0xFEE2, 0xFEE3, 0xFEE4, 0xFEE5, 0xFEE6, 0xFEE7,
|
||||
0xFEE8, 0xFEE9, 0xFEEA, 0xFEEB, 0xFEEC, 0xFEED, 0xFEEE, 0xFEEF,
|
||||
/* F */ 0xFEF0, 0xFEF1, 0xFEF2, 0xFEF3, 0xFEF4, 0xFEF5, 0xFEF6, 0xFEF7,
|
||||
0xFEF8, 0xFEF9, 0xFEFA, 0xFEFB, 0xFEFC, 0xFEFD, 0xFEFE, 0x0000,
|
||||
|
||||
/* 0 */ 0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07,
|
||||
0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F,
|
||||
/* 1 */ 0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17,
|
||||
0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F,
|
||||
/* 2 */ 0xFF20, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47,
|
||||
0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F,
|
||||
/* 3 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57,
|
||||
0xFF58, 0xFF59, 0xFF5A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F,
|
||||
/* 4 */ 0xFF40, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47,
|
||||
0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F,
|
||||
/* 5 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57,
|
||||
0xFF58, 0xFF59, 0xFF5A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F,
|
||||
/* 6 */ 0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67,
|
||||
0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F,
|
||||
/* 7 */ 0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77,
|
||||
0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F,
|
||||
/* 8 */ 0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87,
|
||||
0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F,
|
||||
/* 9 */ 0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97,
|
||||
0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F,
|
||||
/* A */ 0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7,
|
||||
0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF,
|
||||
/* B */ 0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7,
|
||||
0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF,
|
||||
/* C */ 0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7,
|
||||
0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF,
|
||||
/* D */ 0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7,
|
||||
0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF,
|
||||
/* E */ 0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7,
|
||||
0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF,
|
||||
/* F */ 0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7,
|
||||
0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF,
|
||||
};
|
||||
|
||||
int32_t FastUnicodeCompare ( register uint16_t str1[], register uint16_t length1,
|
||||
register uint16_t str2[], register uint16_t length2)
|
||||
{
|
||||
register uint16_t c1,c2;
|
||||
register uint16_t temp;
|
||||
register uint16_t* lowerCaseTable;
|
||||
|
||||
lowerCaseTable = gLowerCaseTable;
|
||||
|
||||
while (1) {
|
||||
c1 = 0;
|
||||
c2 = 0;
|
||||
while (length1 && c1 == 0) {
|
||||
c1 = *(str1++);
|
||||
--length1;
|
||||
if ((temp = lowerCaseTable[c1>>8]) != 0)
|
||||
c1 = lowerCaseTable[temp + (c1 & 0x00FF)];
|
||||
}
|
||||
while (length2 && c2 == 0) {
|
||||
c2 = *(str2++);
|
||||
--length2;
|
||||
if ((temp = lowerCaseTable[c2>>8]) != 0)
|
||||
c2 = lowerCaseTable[temp + (c2 & 0x00FF)];
|
||||
}
|
||||
if (c1 == ':') {
|
||||
c1 = '/';
|
||||
}
|
||||
if (c2 == ':') {
|
||||
c2 = '/';
|
||||
}
|
||||
if (c1 != c2)
|
||||
break;
|
||||
if (c1 == 0)
|
||||
return 0;
|
||||
}
|
||||
if (c1 < c2)
|
||||
return -1;
|
||||
else
|
||||
return 1;
|
||||
}
|
@ -0,0 +1,104 @@
|
||||
#include <stdlib.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
|
||||
static int flatFileRead(io_func* io, off_t location, size_t size, void *buffer) {
|
||||
FILE* file;
|
||||
file = (FILE*) io->data;
|
||||
|
||||
if(size == 0) {
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
//printf("%d %d\n", location, size); fflush(stdout);
|
||||
|
||||
if(fseeko(file, location, SEEK_SET) != 0) {
|
||||
perror("fseek");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if(fread(buffer, size, 1, file) != 1) {
|
||||
perror("fread");
|
||||
return FALSE;
|
||||
} else {
|
||||
return TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
static int flatFileWrite(io_func* io, off_t location, size_t size, void *buffer) {
|
||||
FILE* file;
|
||||
|
||||
/*int i;
|
||||
|
||||
printf("write: %lld %d - ", location, size); fflush(stdout);
|
||||
|
||||
for(i = 0; i < size; i++) {
|
||||
printf("%x ", ((unsigned char*)buffer)[i]);
|
||||
fflush(stdout);
|
||||
}
|
||||
printf("\n"); fflush(stdout);*/
|
||||
|
||||
if(size == 0) {
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
file = (FILE*) io->data;
|
||||
|
||||
if(fseeko(file, location, SEEK_SET) != 0) {
|
||||
perror("fseek");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if(fwrite(buffer, size, 1, file) != 1) {
|
||||
perror("fwrite");
|
||||
return FALSE;
|
||||
} else {
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void closeFlatFile(io_func* io) {
|
||||
FILE* file;
|
||||
|
||||
file = (FILE*) io->data;
|
||||
|
||||
fclose(file);
|
||||
free(io);
|
||||
}
|
||||
|
||||
io_func* openFlatFile(const char* fileName) {
|
||||
io_func* io;
|
||||
|
||||
io = (io_func*) malloc(sizeof(io_func));
|
||||
io->data = fopen(fileName, "rb+");
|
||||
|
||||
if(io->data == NULL) {
|
||||
perror("fopen");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
io->read = &flatFileRead;
|
||||
io->write = &flatFileWrite;
|
||||
io->close = &closeFlatFile;
|
||||
|
||||
return io;
|
||||
}
|
||||
|
||||
io_func* openFlatFileRO(const char* fileName) {
|
||||
io_func* io;
|
||||
|
||||
io = (io_func*) malloc(sizeof(io_func));
|
||||
io->data = fopen(fileName, "rb");
|
||||
|
||||
if(io->data == NULL) {
|
||||
perror("fopen");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
io->read = &flatFileRead;
|
||||
io->write = &flatFileWrite;
|
||||
io->close = &closeFlatFile;
|
||||
|
||||
return io;
|
||||
}
|
333
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/hfs.c
Normal file
333
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/hfs.c
Normal file
@ -0,0 +1,333 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
#include <dirent.h>
|
||||
|
||||
#include <hfs/hfslib.h>
|
||||
#include "abstractfile.h"
|
||||
#include <inttypes.h>
|
||||
|
||||
char endianness;
|
||||
|
||||
|
||||
void cmd_ls(Volume* volume, int argc, const char *argv[]) {
|
||||
if(argc > 1)
|
||||
hfs_ls(volume, argv[1]);
|
||||
else
|
||||
hfs_ls(volume, "/");
|
||||
}
|
||||
|
||||
void cmd_cat(Volume* volume, int argc, const char *argv[]) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
AbstractFile* stdoutFile;
|
||||
|
||||
record = getRecordFromPath(argv[1], volume, NULL, NULL);
|
||||
|
||||
stdoutFile = createAbstractFileFromFile(stdout);
|
||||
|
||||
if(record != NULL) {
|
||||
if(record->recordType == kHFSPlusFileRecord)
|
||||
writeToFile((HFSPlusCatalogFile*)record, stdoutFile, volume);
|
||||
else
|
||||
printf("Not a file\n");
|
||||
} else {
|
||||
printf("No such file or directory\n");
|
||||
}
|
||||
|
||||
free(record);
|
||||
free(stdoutFile);
|
||||
}
|
||||
|
||||
void cmd_extract(Volume* volume, int argc, const char *argv[]) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
AbstractFile *outFile;
|
||||
|
||||
if(argc < 3) {
|
||||
printf("Not enough arguments");
|
||||
return;
|
||||
}
|
||||
|
||||
outFile = createAbstractFileFromFile(fopen(argv[2], "wb"));
|
||||
|
||||
if(outFile == NULL) {
|
||||
printf("cannot create file");
|
||||
}
|
||||
|
||||
record = getRecordFromPath(argv[1], volume, NULL, NULL);
|
||||
|
||||
if(record != NULL) {
|
||||
if(record->recordType == kHFSPlusFileRecord)
|
||||
writeToFile((HFSPlusCatalogFile*)record, outFile, volume);
|
||||
else
|
||||
printf("Not a file\n");
|
||||
} else {
|
||||
printf("No such file or directory\n");
|
||||
}
|
||||
|
||||
outFile->close(outFile);
|
||||
free(record);
|
||||
}
|
||||
|
||||
void cmd_mv(Volume* volume, int argc, const char *argv[]) {
|
||||
if(argc > 2) {
|
||||
move(argv[1], argv[2], volume);
|
||||
} else {
|
||||
printf("Not enough arguments");
|
||||
}
|
||||
}
|
||||
|
||||
void cmd_symlink(Volume* volume, int argc, const char *argv[]) {
|
||||
if(argc > 2) {
|
||||
makeSymlink(argv[1], argv[2], volume);
|
||||
} else {
|
||||
printf("Not enough arguments");
|
||||
}
|
||||
}
|
||||
|
||||
void cmd_mkdir(Volume* volume, int argc, const char *argv[]) {
|
||||
if(argc > 1) {
|
||||
newFolder(argv[1], volume);
|
||||
} else {
|
||||
printf("Not enough arguments");
|
||||
}
|
||||
}
|
||||
|
||||
void cmd_add(Volume* volume, int argc, const char *argv[]) {
|
||||
AbstractFile *inFile;
|
||||
|
||||
if(argc < 3) {
|
||||
printf("Not enough arguments");
|
||||
return;
|
||||
}
|
||||
|
||||
inFile = createAbstractFileFromFile(fopen(argv[1], "rb"));
|
||||
|
||||
if(inFile == NULL) {
|
||||
printf("file to add not found");
|
||||
}
|
||||
|
||||
add_hfs(volume, inFile, argv[2]);
|
||||
}
|
||||
|
||||
void cmd_rm(Volume* volume, int argc, const char *argv[]) {
|
||||
if(argc > 1) {
|
||||
removeFile(argv[1], volume);
|
||||
} else {
|
||||
printf("Not enough arguments");
|
||||
}
|
||||
}
|
||||
|
||||
void cmd_chmod(Volume* volume, int argc, const char *argv[]) {
|
||||
int mode;
|
||||
|
||||
if(argc > 2) {
|
||||
sscanf(argv[1], "%o", &mode);
|
||||
chmodFile(argv[2], mode, volume);
|
||||
} else {
|
||||
printf("Not enough arguments");
|
||||
}
|
||||
}
|
||||
|
||||
void cmd_extractall(Volume* volume, int argc, const char *argv[]) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
char cwd[1024];
|
||||
char* name;
|
||||
|
||||
ASSERT(getcwd(cwd, 1024) != NULL, "cannot get current working directory");
|
||||
|
||||
if(argc > 1)
|
||||
record = getRecordFromPath(argv[1], volume, &name, NULL);
|
||||
else
|
||||
record = getRecordFromPath("/", volume, &name, NULL);
|
||||
|
||||
if(argc > 2) {
|
||||
ASSERT(chdir(argv[2]) == 0, "chdir");
|
||||
}
|
||||
|
||||
if(record != NULL) {
|
||||
if(record->recordType == kHFSPlusFolderRecord)
|
||||
extractAllInFolder(((HFSPlusCatalogFolder*)record)->folderID, volume);
|
||||
else
|
||||
printf("Not a folder\n");
|
||||
} else {
|
||||
printf("No such file or directory\n");
|
||||
}
|
||||
free(record);
|
||||
|
||||
ASSERT(chdir(cwd) == 0, "chdir");
|
||||
}
|
||||
|
||||
|
||||
void cmd_rmall(Volume* volume, int argc, const char *argv[]) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
char* name;
|
||||
char initPath[1024];
|
||||
int lastCharOfPath;
|
||||
|
||||
if(argc > 1) {
|
||||
record = getRecordFromPath(argv[1], volume, &name, NULL);
|
||||
strcpy(initPath, argv[1]);
|
||||
lastCharOfPath = strlen(argv[1]) - 1;
|
||||
if(argv[1][lastCharOfPath] != '/') {
|
||||
initPath[lastCharOfPath + 1] = '/';
|
||||
initPath[lastCharOfPath + 2] = '\0';
|
||||
}
|
||||
} else {
|
||||
record = getRecordFromPath("/", volume, &name, NULL);
|
||||
initPath[0] = '/';
|
||||
initPath[1] = '\0';
|
||||
}
|
||||
|
||||
if(record != NULL) {
|
||||
if(record->recordType == kHFSPlusFolderRecord) {
|
||||
removeAllInFolder(((HFSPlusCatalogFolder*)record)->folderID, volume, initPath);
|
||||
} else {
|
||||
printf("Not a folder\n");
|
||||
}
|
||||
} else {
|
||||
printf("No such file or directory\n");
|
||||
}
|
||||
free(record);
|
||||
}
|
||||
|
||||
void cmd_addall(Volume* volume, int argc, const char *argv[]) {
|
||||
if(argc < 2) {
|
||||
printf("Not enough arguments");
|
||||
return;
|
||||
}
|
||||
|
||||
if(argc > 2) {
|
||||
addall_hfs(volume, argv[1], argv[2]);
|
||||
} else {
|
||||
addall_hfs(volume, argv[1], "/");
|
||||
}
|
||||
}
|
||||
|
||||
void cmd_grow(Volume* volume, int argc, const char *argv[]) {
|
||||
uint64_t newSize;
|
||||
|
||||
if(argc < 2) {
|
||||
printf("Not enough arguments\n");
|
||||
return;
|
||||
}
|
||||
|
||||
newSize = 0;
|
||||
sscanf(argv[1], "%" PRId64, &newSize);
|
||||
|
||||
grow_hfs(volume, newSize);
|
||||
|
||||
printf("grew volume: %" PRId64 "\n", newSize);
|
||||
}
|
||||
|
||||
void cmd_getattr(Volume* volume, int argc, const char *argv[]) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
|
||||
if(argc < 3) {
|
||||
printf("Not enough arguments");
|
||||
return;
|
||||
}
|
||||
|
||||
record = getRecordFromPath(argv[1], volume, NULL, NULL);
|
||||
|
||||
if(record != NULL) {
|
||||
HFSCatalogNodeID id;
|
||||
uint8_t* data;
|
||||
size_t size;
|
||||
if(record->recordType == kHFSPlusFileRecord)
|
||||
id = ((HFSPlusCatalogFile*)record)->fileID;
|
||||
else
|
||||
id = ((HFSPlusCatalogFolder*)record)->folderID;
|
||||
|
||||
size = getAttribute(volume, id, argv[2], &data);
|
||||
|
||||
if(size > 0) {
|
||||
fwrite(data, size, 1, stdout);
|
||||
free(data);
|
||||
} else {
|
||||
printf("No such attribute\n");
|
||||
}
|
||||
} else {
|
||||
printf("No such file or directory\n");
|
||||
}
|
||||
|
||||
free(record);
|
||||
}
|
||||
|
||||
void TestByteOrder()
|
||||
{
|
||||
short int word = 0x0001;
|
||||
char *byte = (char *) &word;
|
||||
endianness = byte[0] ? IS_LITTLE_ENDIAN : IS_BIG_ENDIAN;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
io_func* io;
|
||||
Volume* volume;
|
||||
|
||||
TestByteOrder();
|
||||
|
||||
if(argc < 3) {
|
||||
printf("usage: %s <image-file> <ls|cat|mv|mkdir|add|rm|chmod|extract|extractall|rmall|addall|debug> <arguments>\n", argv[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
io = openFlatFile(argv[1]);
|
||||
if(io == NULL) {
|
||||
fprintf(stderr, "error: Cannot open image-file.\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
volume = openVolume(io);
|
||||
if(volume == NULL) {
|
||||
fprintf(stderr, "error: Cannot open volume.\n");
|
||||
CLOSE(io);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if(argc > 1) {
|
||||
if(strcmp(argv[2], "ls") == 0) {
|
||||
cmd_ls(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "cat") == 0) {
|
||||
cmd_cat(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "mv") == 0) {
|
||||
cmd_mv(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "symlink") == 0) {
|
||||
cmd_symlink(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "mkdir") == 0) {
|
||||
cmd_mkdir(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "add") == 0) {
|
||||
cmd_add(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "rm") == 0) {
|
||||
cmd_rm(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "chmod") == 0) {
|
||||
cmd_chmod(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "extract") == 0) {
|
||||
cmd_extract(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "extractall") == 0) {
|
||||
cmd_extractall(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "rmall") == 0) {
|
||||
cmd_rmall(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "addall") == 0) {
|
||||
cmd_addall(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "grow") == 0) {
|
||||
cmd_grow(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "getattr") == 0) {
|
||||
cmd_getattr(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "debug") == 0) {
|
||||
if(argc > 3 && strcmp(argv[3], "verbose") == 0) {
|
||||
debugBTree(volume->catalogTree, TRUE);
|
||||
} else {
|
||||
debugBTree(volume->catalogTree, FALSE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closeVolume(volume);
|
||||
CLOSE(io);
|
||||
|
||||
return 0;
|
||||
}
|
@ -0,0 +1,292 @@
|
||||
#include <zlib.h>
|
||||
#include "common.h"
|
||||
#include <hfs/hfsplus.h>
|
||||
#include <hfs/hfscompress.h>
|
||||
|
||||
void flipHFSPlusDecmpfs(HFSPlusDecmpfs* compressData) {
|
||||
FLIPENDIANLE(compressData->magic);
|
||||
FLIPENDIANLE(compressData->flags);
|
||||
FLIPENDIANLE(compressData->size);
|
||||
}
|
||||
|
||||
void flipRsrcHead(HFSPlusCmpfRsrcHead* data) {
|
||||
FLIPENDIAN(data->headerSize);
|
||||
FLIPENDIAN(data->totalSize);
|
||||
FLIPENDIAN(data->dataSize);
|
||||
FLIPENDIAN(data->flags);
|
||||
}
|
||||
|
||||
void flipRsrcBlockHead(HFSPlusCmpfRsrcBlockHead* data) {
|
||||
FLIPENDIAN(data->dataSize);
|
||||
FLIPENDIANLE(data->numBlocks);
|
||||
}
|
||||
|
||||
void flipRsrcBlock(HFSPlusCmpfRsrcBlock* data) {
|
||||
FLIPENDIANLE(data->offset);
|
||||
FLIPENDIANLE(data->size);
|
||||
}
|
||||
|
||||
void flipHFSPlusCmpfEnd(HFSPlusCmpfEnd* data) {
|
||||
FLIPENDIAN(data->unk1);
|
||||
FLIPENDIAN(data->unk2);
|
||||
FLIPENDIAN(data->unk3);
|
||||
FLIPENDIAN(data->magic);
|
||||
FLIPENDIAN(data->flags);
|
||||
FLIPENDIANLE(data->size);
|
||||
FLIPENDIANLE(data->unk4);
|
||||
}
|
||||
|
||||
static int compressedRead(io_func* io, off_t location, size_t size, void *buffer) {
|
||||
HFSPlusCompressed* data = (HFSPlusCompressed*) io->data;
|
||||
size_t toRead;
|
||||
|
||||
while(size > 0) {
|
||||
if(data->cached && location >= data->cachedStart && location < data->cachedEnd) {
|
||||
if((data->cachedEnd - location) < size)
|
||||
toRead = data->cachedEnd - location;
|
||||
else
|
||||
toRead = size;
|
||||
|
||||
memcpy(buffer, data->cached + (location - data->cachedStart), toRead);
|
||||
|
||||
size -= toRead;
|
||||
location += toRead;
|
||||
buffer = ((uint8_t*) buffer) + toRead;
|
||||
}
|
||||
|
||||
if(size == 0)
|
||||
break;
|
||||
|
||||
// Try to cache
|
||||
uLongf actualSize;
|
||||
uint32_t block = location / 0x10000;
|
||||
uint8_t* compressed = (uint8_t*) malloc(data->blocks->blocks[block].size);
|
||||
if(!READ(data->io, data->rsrcHead.headerSize + sizeof(uint32_t) + data->blocks->blocks[block].offset, data->blocks->blocks[block].size, compressed)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
|
||||
if(data->cached)
|
||||
free(data->cached);
|
||||
|
||||
data->cached = (uint8_t*) malloc(0x10000);
|
||||
actualSize = 0x10000;
|
||||
uncompress(data->cached, &actualSize, compressed, data->blocks->blocks[block].size);
|
||||
data->cachedStart = block * 0x10000;
|
||||
data->cachedEnd = data->cachedStart + actualSize;
|
||||
free(compressed);
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static int compressedWrite(io_func* io, off_t location, size_t size, void *buffer) {
|
||||
HFSPlusCompressed* data = (HFSPlusCompressed*) io->data;
|
||||
|
||||
if(data->cachedStart != 0 || data->cachedEnd != data->decmpfs->size) {
|
||||
// Cache entire file
|
||||
uint8_t* newCache = (uint8_t*) malloc(data->decmpfs->size);
|
||||
compressedRead(io, 0, data->decmpfs->size, newCache);
|
||||
if(data->cached)
|
||||
free(data->cached);
|
||||
|
||||
data->cached = newCache;
|
||||
data->cachedStart = 0;
|
||||
data->cachedEnd = data->decmpfs->size;
|
||||
}
|
||||
|
||||
if((location + size) > data->decmpfs->size) {
|
||||
data->decmpfs->size = location + size;
|
||||
data->cached = (uint8_t*) realloc(data->cached, data->decmpfs->size);
|
||||
data->cachedEnd = data->decmpfs->size;
|
||||
}
|
||||
|
||||
memcpy(data->cached + location, buffer, size);
|
||||
|
||||
data->dirty = TRUE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void closeHFSPlusCompressed(io_func* io) {
|
||||
HFSPlusCompressed* data = (HFSPlusCompressed*) io->data;
|
||||
|
||||
if(data->io)
|
||||
CLOSE(data->io);
|
||||
|
||||
if(data->dirty) {
|
||||
if(data->blocks)
|
||||
free(data->blocks);
|
||||
|
||||
data->decmpfs->magic = CMPFS_MAGIC;
|
||||
data->decmpfs->flags = 0x4;
|
||||
data->decmpfsSize = sizeof(HFSPlusDecmpfs);
|
||||
|
||||
uint32_t numBlocks = (data->decmpfs->size + 0xFFFF) / 0x10000;
|
||||
uint32_t blocksSize = sizeof(HFSPlusCmpfRsrcBlockHead) + (numBlocks * sizeof(HFSPlusCmpfRsrcBlock));
|
||||
data->blocks = (HFSPlusCmpfRsrcBlockHead*) malloc(sizeof(HFSPlusCmpfRsrcBlockHead) + (numBlocks * sizeof(HFSPlusCmpfRsrcBlock)));
|
||||
data->blocks->numBlocks = numBlocks;
|
||||
data->blocks->dataSize = blocksSize - sizeof(uint32_t); // without the front dataSize in BlockHead.
|
||||
|
||||
data->rsrcHead.headerSize = 0x100;
|
||||
data->rsrcHead.dataSize = blocksSize;
|
||||
data->rsrcHead.totalSize = data->rsrcHead.headerSize + data->rsrcHead.dataSize;
|
||||
data->rsrcHead.flags = 0x32;
|
||||
|
||||
uint8_t* buffer = (uint8_t*) malloc((0x10000 * 1.1) + 12);
|
||||
uint32_t curFileOffset = data->blocks->dataSize;
|
||||
uint32_t i;
|
||||
for(i = 0; i < numBlocks; i++) {
|
||||
data->blocks->blocks[i].offset = curFileOffset;
|
||||
uLongf actualSize = (0x10000 * 1.1) + 12;
|
||||
compress(buffer, &actualSize, data->cached + (0x10000 * i),
|
||||
(data->decmpfs->size - (0x10000 * i)) > 0x10000 ? 0x10000 : (data->decmpfs->size - (0x10000 * i)));
|
||||
data->blocks->blocks[i].size = actualSize;
|
||||
|
||||
// check if we can fit the whole thing into an inline extended attribute
|
||||
// a little fudge factor here since sizeof(HFSPlusAttrKey) is bigger than it ought to be, since only 127 characters are strictly allowed
|
||||
if(numBlocks <= 1 && (actualSize + sizeof(HFSPlusDecmpfs) + sizeof(HFSPlusAttrKey)) <= 0x1000) {
|
||||
data->decmpfs->flags = 0x3;
|
||||
memcpy(data->decmpfs->data, buffer, actualSize);
|
||||
data->decmpfsSize = sizeof(HFSPlusDecmpfs) + actualSize;
|
||||
printf("inline data\n");
|
||||
break;
|
||||
} else {
|
||||
if(i == 0) {
|
||||
data->io = openRawFile(data->file->fileID, &data->file->resourceFork, (HFSPlusCatalogRecord*)data->file, data->volume);
|
||||
if(!data->io) {
|
||||
hfs_panic("error opening resource fork");
|
||||
}
|
||||
}
|
||||
|
||||
WRITE(data->io, data->rsrcHead.headerSize + sizeof(uint32_t) + data->blocks->blocks[i].offset, data->blocks->blocks[i].size, buffer);
|
||||
|
||||
curFileOffset += data->blocks->blocks[i].size;
|
||||
data->blocks->dataSize += data->blocks->blocks[i].size;
|
||||
data->rsrcHead.dataSize += data->blocks->blocks[i].size;
|
||||
data->rsrcHead.totalSize += data->blocks->blocks[i].size;
|
||||
}
|
||||
}
|
||||
|
||||
free(buffer);
|
||||
|
||||
if(data->decmpfs->flags == 0x4) {
|
||||
flipRsrcHead(&data->rsrcHead);
|
||||
WRITE(data->io, 0, sizeof(HFSPlusCmpfRsrcHead), &data->rsrcHead);
|
||||
flipRsrcHead(&data->rsrcHead);
|
||||
|
||||
for(i = 0; i < data->blocks->numBlocks; i++) {
|
||||
flipRsrcBlock(&data->blocks->blocks[i]);
|
||||
}
|
||||
flipRsrcBlockHead(data->blocks);
|
||||
WRITE(data->io, data->rsrcHead.headerSize, blocksSize, data->blocks);
|
||||
flipRsrcBlockHead(data->blocks);
|
||||
for(i = 0; i < data->blocks->numBlocks; i++) {
|
||||
flipRsrcBlock(&data->blocks->blocks[i]);
|
||||
}
|
||||
|
||||
HFSPlusCmpfEnd end;
|
||||
memset(&end, 0, sizeof(HFSPlusCmpfEnd));
|
||||
end.unk1 = 0x1C;
|
||||
end.unk2 = 0x32;
|
||||
end.unk3 = 0x0;
|
||||
end.magic = CMPFS_MAGIC;
|
||||
end.flags = 0xA;
|
||||
end.size = 0xFFFF01;
|
||||
end.unk4 = 0x0;
|
||||
|
||||
flipHFSPlusCmpfEnd(&end);
|
||||
WRITE(data->io, data->rsrcHead.totalSize, sizeof(HFSPlusCmpfEnd), &end);
|
||||
flipHFSPlusCmpfEnd(&end);
|
||||
|
||||
CLOSE(data->io);
|
||||
}
|
||||
|
||||
flipHFSPlusDecmpfs(data->decmpfs);
|
||||
setAttribute(data->volume, data->file->fileID, "com.apple.decmpfs", (uint8_t*)(data->decmpfs), data->decmpfsSize);
|
||||
flipHFSPlusDecmpfs(data->decmpfs);
|
||||
}
|
||||
|
||||
if(data->cached)
|
||||
free(data->cached);
|
||||
|
||||
if(data->blocks)
|
||||
free(data->blocks);
|
||||
|
||||
free(data->decmpfs);
|
||||
free(data);
|
||||
free(io);
|
||||
}
|
||||
|
||||
io_func* openHFSPlusCompressed(Volume* volume, HFSPlusCatalogFile* file) {
|
||||
io_func* io;
|
||||
HFSPlusCompressed* data;
|
||||
uLongf actualSize;
|
||||
|
||||
io = (io_func*) malloc(sizeof(io_func));
|
||||
data = (HFSPlusCompressed*) malloc(sizeof(HFSPlusCompressed));
|
||||
|
||||
data->volume = volume;
|
||||
data->file = file;
|
||||
|
||||
io->data = data;
|
||||
io->read = &compressedRead;
|
||||
io->write = &compressedWrite;
|
||||
io->close = &closeHFSPlusCompressed;
|
||||
|
||||
data->cached = NULL;
|
||||
data->cachedStart = 0;
|
||||
data->cachedEnd = 0;
|
||||
data->io = NULL;
|
||||
data->blocks = NULL;
|
||||
data->dirty = FALSE;
|
||||
|
||||
data->decmpfsSize = getAttribute(volume, file->fileID, "com.apple.decmpfs", (uint8_t**)(&data->decmpfs));
|
||||
if(data->decmpfsSize == 0) {
|
||||
data->decmpfs = (HFSPlusDecmpfs*) malloc(0x1000);
|
||||
data->decmpfs->size = 0;
|
||||
return io; // previously not compressed file
|
||||
}
|
||||
|
||||
flipHFSPlusDecmpfs(data->decmpfs);
|
||||
|
||||
if(data->decmpfs->flags == 0x3) {
|
||||
data->cached = (uint8_t*) malloc(data->decmpfs->size);
|
||||
actualSize = data->decmpfs->size;
|
||||
uncompress(data->cached, &actualSize, data->decmpfs->data, data->decmpfsSize - sizeof(HFSPlusDecmpfs));
|
||||
if(actualSize != data->decmpfs->size) {
|
||||
fprintf(stderr, "decmpfs: size mismatch\n");
|
||||
}
|
||||
data->cachedStart = 0;
|
||||
data->cachedEnd = actualSize;
|
||||
} else {
|
||||
data->io = openRawFile(file->fileID, &file->resourceFork, (HFSPlusCatalogRecord*)file, volume);
|
||||
if(!data->io) {
|
||||
hfs_panic("error opening resource fork");
|
||||
}
|
||||
|
||||
if(!READ(data->io, 0, sizeof(HFSPlusCmpfRsrcHead), &data->rsrcHead)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
|
||||
flipRsrcHead(&data->rsrcHead);
|
||||
|
||||
data->blocks = (HFSPlusCmpfRsrcBlockHead*) malloc(sizeof(HFSPlusCmpfRsrcBlockHead));
|
||||
if(!READ(data->io, data->rsrcHead.headerSize, sizeof(HFSPlusCmpfRsrcBlockHead), data->blocks)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
|
||||
flipRsrcBlockHead(data->blocks);
|
||||
|
||||
data->blocks = (HFSPlusCmpfRsrcBlockHead*) realloc(data->blocks, sizeof(HFSPlusCmpfRsrcBlockHead) + (sizeof(HFSPlusCmpfRsrcBlock) * data->blocks->numBlocks));
|
||||
if(!READ(data->io, data->rsrcHead.headerSize + sizeof(HFSPlusCmpfRsrcBlockHead), sizeof(HFSPlusCmpfRsrcBlock) * data->blocks->numBlocks, data->blocks->blocks)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
|
||||
int i;
|
||||
for(i = 0; i < data->blocks->numBlocks; i++) {
|
||||
flipRsrcBlock(&data->blocks->blocks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return io;
|
||||
}
|
||||
|
723
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/hfslib.c
Normal file
723
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/hfslib.c
Normal file
@ -0,0 +1,723 @@
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#include <time.h>
|
||||
#include <sys/types.h>
|
||||
#include "common.h"
|
||||
#include <hfs/hfsplus.h>
|
||||
#include <hfs/hfscompress.h>
|
||||
#include "abstractfile.h"
|
||||
#include <sys/stat.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
#define BUFSIZE 1024*1024
|
||||
|
||||
static int silence = 0;
|
||||
|
||||
void hfs_setsilence(int s) {
|
||||
silence = s;
|
||||
}
|
||||
|
||||
void writeToFile(HFSPlusCatalogFile* file, AbstractFile* output, Volume* volume) {
|
||||
unsigned char* buffer;
|
||||
io_func* io;
|
||||
off_t curPosition;
|
||||
size_t bytesLeft;
|
||||
|
||||
buffer = (unsigned char*) malloc(BUFSIZE);
|
||||
|
||||
if(file->permissions.ownerFlags & UF_COMPRESSED) {
|
||||
io = openHFSPlusCompressed(volume, file);
|
||||
if(io == NULL) {
|
||||
hfs_panic("error opening file");
|
||||
free(buffer);
|
||||
return;
|
||||
}
|
||||
|
||||
curPosition = 0;
|
||||
bytesLeft = ((HFSPlusCompressed*) io->data)->decmpfs->size;
|
||||
} else {
|
||||
io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume);
|
||||
if(io == NULL) {
|
||||
hfs_panic("error opening file");
|
||||
free(buffer);
|
||||
return;
|
||||
}
|
||||
|
||||
curPosition = 0;
|
||||
bytesLeft = file->dataFork.logicalSize;
|
||||
}
|
||||
while(bytesLeft > 0) {
|
||||
if(bytesLeft > BUFSIZE) {
|
||||
if(!READ(io, curPosition, BUFSIZE, buffer)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
if(output->write(output, buffer, BUFSIZE) != BUFSIZE) {
|
||||
hfs_panic("error writing");
|
||||
}
|
||||
curPosition += BUFSIZE;
|
||||
bytesLeft -= BUFSIZE;
|
||||
} else {
|
||||
if(!READ(io, curPosition, bytesLeft, buffer)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
if(output->write(output, buffer, bytesLeft) != bytesLeft) {
|
||||
hfs_panic("error writing");
|
||||
}
|
||||
curPosition += bytesLeft;
|
||||
bytesLeft -= bytesLeft;
|
||||
}
|
||||
}
|
||||
CLOSE(io);
|
||||
|
||||
free(buffer);
|
||||
}
|
||||
|
||||
void writeToHFSFile(HFSPlusCatalogFile* file, AbstractFile* input, Volume* volume) {
|
||||
unsigned char *buffer;
|
||||
io_func* io;
|
||||
off_t curPosition;
|
||||
off_t bytesLeft;
|
||||
|
||||
buffer = (unsigned char*) malloc(BUFSIZE);
|
||||
|
||||
bytesLeft = input->getLength(input);
|
||||
|
||||
if(file->permissions.ownerFlags & UF_COMPRESSED) {
|
||||
io = openHFSPlusCompressed(volume, file);
|
||||
if(io == NULL) {
|
||||
hfs_panic("error opening file");
|
||||
free(buffer);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume);
|
||||
if(io == NULL) {
|
||||
hfs_panic("error opening file");
|
||||
free(buffer);
|
||||
return;
|
||||
}
|
||||
allocate((RawFile*)io->data, bytesLeft);
|
||||
}
|
||||
|
||||
curPosition = 0;
|
||||
|
||||
while(bytesLeft > 0) {
|
||||
if(bytesLeft > BUFSIZE) {
|
||||
if(input->read(input, buffer, BUFSIZE) != BUFSIZE) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
if(!WRITE(io, curPosition, BUFSIZE, buffer)) {
|
||||
hfs_panic("error writing");
|
||||
}
|
||||
curPosition += BUFSIZE;
|
||||
bytesLeft -= BUFSIZE;
|
||||
} else {
|
||||
if(input->read(input, buffer, (size_t)bytesLeft) != (size_t)bytesLeft) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
if(!WRITE(io, curPosition, (size_t)bytesLeft, buffer)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
curPosition += bytesLeft;
|
||||
bytesLeft -= bytesLeft;
|
||||
}
|
||||
}
|
||||
|
||||
CLOSE(io);
|
||||
|
||||
free(buffer);
|
||||
}
|
||||
|
||||
void get_hfs(Volume* volume, const char* inFileName, AbstractFile* output) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
|
||||
record = getRecordFromPath(inFileName, volume, NULL, NULL);
|
||||
|
||||
if(record != NULL) {
|
||||
if(record->recordType == kHFSPlusFileRecord)
|
||||
writeToFile((HFSPlusCatalogFile*)record, output, volume);
|
||||
else {
|
||||
printf("Not a file\n");
|
||||
exit(0);
|
||||
}
|
||||
} else {
|
||||
printf("No such file or directory\n");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
free(record);
|
||||
}
|
||||
|
||||
int add_hfs(Volume* volume, AbstractFile* inFile, const char* outFileName) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
int ret;
|
||||
|
||||
record = getRecordFromPath(outFileName, volume, NULL, NULL);
|
||||
|
||||
if(record != NULL) {
|
||||
if(record->recordType == kHFSPlusFileRecord) {
|
||||
writeToHFSFile((HFSPlusCatalogFile*)record, inFile, volume);
|
||||
ret = TRUE;
|
||||
} else {
|
||||
printf("Not a file\n");
|
||||
exit(0);
|
||||
}
|
||||
} else {
|
||||
if(newFile(outFileName, volume)) {
|
||||
record = getRecordFromPath(outFileName, volume, NULL, NULL);
|
||||
writeToHFSFile((HFSPlusCatalogFile*)record, inFile, volume);
|
||||
ret = TRUE;
|
||||
} else {
|
||||
ret = FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
inFile->close(inFile);
|
||||
if(record != NULL) {
|
||||
free(record);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void grow_hfs(Volume* volume, uint64_t newSize) {
|
||||
uint32_t newBlocks;
|
||||
uint32_t blocksToGrow;
|
||||
uint64_t newMapSize;
|
||||
uint64_t i;
|
||||
unsigned char zero;
|
||||
|
||||
zero = 0;
|
||||
|
||||
newBlocks = newSize / volume->volumeHeader->blockSize;
|
||||
|
||||
if(newBlocks <= volume->volumeHeader->totalBlocks) {
|
||||
printf("Cannot shrink volume\n");
|
||||
return;
|
||||
}
|
||||
|
||||
blocksToGrow = newBlocks - volume->volumeHeader->totalBlocks;
|
||||
newMapSize = newBlocks / 8;
|
||||
|
||||
if(volume->volumeHeader->allocationFile.logicalSize < newMapSize) {
|
||||
if(volume->volumeHeader->freeBlocks
|
||||
< ((newMapSize - volume->volumeHeader->allocationFile.logicalSize) / volume->volumeHeader->blockSize)) {
|
||||
printf("Not enough room to allocate new allocation map blocks\n");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
allocate((RawFile*) (volume->allocationFile->data), newMapSize);
|
||||
}
|
||||
|
||||
/* unreserve last block */
|
||||
setBlockUsed(volume, volume->volumeHeader->totalBlocks - 1, 0);
|
||||
/* don't need to increment freeBlocks because we will allocate another alternate volume header later on */
|
||||
|
||||
/* "unallocate" the new blocks */
|
||||
for(i = ((volume->volumeHeader->totalBlocks / 8) + 1); i < newMapSize; i++) {
|
||||
ASSERT(WRITE(volume->allocationFile, i, 1, &zero), "WRITE");
|
||||
}
|
||||
|
||||
/* grow backing store size */
|
||||
ASSERT(WRITE(volume->image, newSize - 1, 1, &zero), "WRITE");
|
||||
|
||||
/* write new volume information */
|
||||
volume->volumeHeader->totalBlocks = newBlocks;
|
||||
volume->volumeHeader->freeBlocks += blocksToGrow;
|
||||
|
||||
/* reserve last block */
|
||||
setBlockUsed(volume, volume->volumeHeader->totalBlocks - 1, 1);
|
||||
|
||||
updateVolume(volume);
|
||||
}
|
||||
|
||||
void removeAllInFolder(HFSCatalogNodeID folderID, Volume* volume, const char* parentName) {
|
||||
CatalogRecordList* list;
|
||||
CatalogRecordList* theList;
|
||||
char fullName[1024];
|
||||
char* name;
|
||||
char* pathComponent;
|
||||
int pathLen;
|
||||
char isRoot;
|
||||
|
||||
HFSPlusCatalogFolder* folder;
|
||||
theList = list = getFolderContents(folderID, volume);
|
||||
|
||||
strcpy(fullName, parentName);
|
||||
pathComponent = fullName + strlen(fullName);
|
||||
|
||||
isRoot = FALSE;
|
||||
if(strcmp(fullName, "/") == 0) {
|
||||
isRoot = TRUE;
|
||||
}
|
||||
|
||||
while(list != NULL) {
|
||||
name = unicodeToAscii(&list->name);
|
||||
if(isRoot && (name[0] == '\0' || strncmp(name, ".HFS+ Private Directory Data", sizeof(".HFS+ Private Directory Data") - 1) == 0)) {
|
||||
free(name);
|
||||
list = list->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
strcpy(pathComponent, name);
|
||||
pathLen = strlen(fullName);
|
||||
|
||||
if(list->record->recordType == kHFSPlusFolderRecord) {
|
||||
folder = (HFSPlusCatalogFolder*)list->record;
|
||||
fullName[pathLen] = '/';
|
||||
fullName[pathLen + 1] = '\0';
|
||||
removeAllInFolder(folder->folderID, volume, fullName);
|
||||
} else {
|
||||
printf("%s\n", fullName);
|
||||
removeFile(fullName, volume);
|
||||
}
|
||||
|
||||
free(name);
|
||||
list = list->next;
|
||||
}
|
||||
|
||||
releaseCatalogRecordList(theList);
|
||||
|
||||
if(!isRoot) {
|
||||
*(pathComponent - 1) = '\0';
|
||||
printf("%s\n", fullName);
|
||||
removeFile(fullName, volume);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void addAllInFolder(HFSCatalogNodeID folderID, Volume* volume, const char* parentName) {
|
||||
CatalogRecordList* list;
|
||||
CatalogRecordList* theList;
|
||||
char cwd[1024];
|
||||
char fullName[1024];
|
||||
char testBuffer[1024];
|
||||
char* pathComponent;
|
||||
int pathLen;
|
||||
|
||||
char* name;
|
||||
|
||||
DIR* dir;
|
||||
DIR* tmp;
|
||||
|
||||
HFSCatalogNodeID cnid;
|
||||
|
||||
struct dirent* ent;
|
||||
|
||||
AbstractFile* file;
|
||||
HFSPlusCatalogFile* outFile;
|
||||
|
||||
strcpy(fullName, parentName);
|
||||
pathComponent = fullName + strlen(fullName);
|
||||
|
||||
ASSERT(getcwd(cwd, 1024) != NULL, "cannot get current working directory");
|
||||
|
||||
theList = list = getFolderContents(folderID, volume);
|
||||
|
||||
ASSERT((dir = opendir(cwd)) != NULL, "opendir");
|
||||
|
||||
while((ent = readdir(dir)) != NULL) {
|
||||
if(ent->d_name[0] == '.' && (ent->d_name[1] == '\0' || (ent->d_name[1] == '.' && ent->d_name[2] == '\0'))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
strcpy(pathComponent, ent->d_name);
|
||||
pathLen = strlen(fullName);
|
||||
|
||||
cnid = 0;
|
||||
list = theList;
|
||||
while(list != NULL) {
|
||||
name = unicodeToAscii(&list->name);
|
||||
if(strcmp(name, ent->d_name) == 0) {
|
||||
cnid = (list->record->recordType == kHFSPlusFolderRecord) ? (((HFSPlusCatalogFolder*)list->record)->folderID)
|
||||
: (((HFSPlusCatalogFile*)list->record)->fileID);
|
||||
free(name);
|
||||
break;
|
||||
}
|
||||
free(name);
|
||||
list = list->next;
|
||||
}
|
||||
|
||||
if((tmp = opendir(ent->d_name)) != NULL) {
|
||||
closedir(tmp);
|
||||
printf("folder: %s\n", fullName); fflush(stdout);
|
||||
|
||||
if(cnid == 0) {
|
||||
cnid = newFolder(fullName, volume);
|
||||
}
|
||||
|
||||
fullName[pathLen] = '/';
|
||||
fullName[pathLen + 1] = '\0';
|
||||
ASSERT(chdir(ent->d_name) == 0, "chdir");
|
||||
addAllInFolder(cnid, volume, fullName);
|
||||
ASSERT(chdir(cwd) == 0, "chdir");
|
||||
} else {
|
||||
printf("file: %s\n", fullName); fflush(stdout);
|
||||
if(cnid == 0) {
|
||||
cnid = newFile(fullName, volume);
|
||||
}
|
||||
file = createAbstractFileFromFile(fopen(ent->d_name, "rb"));
|
||||
ASSERT(file != NULL, "fopen");
|
||||
outFile = (HFSPlusCatalogFile*)getRecordByCNID(cnid, volume);
|
||||
writeToHFSFile(outFile, file, volume);
|
||||
file->close(file);
|
||||
free(outFile);
|
||||
|
||||
if(strncmp(fullName, "/Applications/", sizeof("/Applications/") - 1) == 0) {
|
||||
testBuffer[0] = '\0';
|
||||
strcpy(testBuffer, "/Applications/");
|
||||
strcat(testBuffer, ent->d_name);
|
||||
strcat(testBuffer, ".app/");
|
||||
strcat(testBuffer, ent->d_name);
|
||||
if(strcmp(testBuffer, fullName) == 0) {
|
||||
if(strcmp(ent->d_name, "Installer") == 0
|
||||
|| strcmp(ent->d_name, "BootNeuter") == 0
|
||||
) {
|
||||
printf("Giving setuid permissions to %s...\n", fullName); fflush(stdout);
|
||||
chmodFile(fullName, 04755, volume);
|
||||
} else {
|
||||
printf("Giving permissions to %s\n", fullName); fflush(stdout);
|
||||
chmodFile(fullName, 0755, volume);
|
||||
}
|
||||
}
|
||||
} else if(strncmp(fullName, "/bin/", sizeof("/bin/") - 1) == 0
|
||||
|| strncmp(fullName, "/Applications/BootNeuter.app/bin/", sizeof("/Applications/BootNeuter.app/bin/") - 1) == 0
|
||||
|| strncmp(fullName, "/sbin/", sizeof("/sbin/") - 1) == 0
|
||||
|| strncmp(fullName, "/usr/sbin/", sizeof("/usr/sbin/") - 1) == 0
|
||||
|| strncmp(fullName, "/usr/bin/", sizeof("/usr/bin/") - 1) == 0
|
||||
|| strncmp(fullName, "/usr/libexec/", sizeof("/usr/libexec/") - 1) == 0
|
||||
|| strncmp(fullName, "/usr/local/bin/", sizeof("/usr/local/bin/") - 1) == 0
|
||||
|| strncmp(fullName, "/usr/local/sbin/", sizeof("/usr/local/sbin/") - 1) == 0
|
||||
|| strncmp(fullName, "/usr/local/libexec/", sizeof("/usr/local/libexec/") - 1) == 0
|
||||
) {
|
||||
chmodFile(fullName, 0755, volume);
|
||||
printf("Giving permissions to %s\n", fullName); fflush(stdout);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
|
||||
releaseCatalogRecordList(theList);
|
||||
}
|
||||
|
||||
void extractAllInFolder(HFSCatalogNodeID folderID, Volume* volume) {
|
||||
CatalogRecordList* list;
|
||||
CatalogRecordList* theList;
|
||||
char cwd[1024];
|
||||
char* name;
|
||||
HFSPlusCatalogFolder* folder;
|
||||
HFSPlusCatalogFile* file;
|
||||
AbstractFile* outFile;
|
||||
struct stat status;
|
||||
|
||||
ASSERT(getcwd(cwd, 1024) != NULL, "cannot get current working directory");
|
||||
|
||||
theList = list = getFolderContents(folderID, volume);
|
||||
|
||||
while(list != NULL) {
|
||||
name = unicodeToAscii(&list->name);
|
||||
if(strncmp(name, ".HFS+ Private Directory Data", sizeof(".HFS+ Private Directory Data") - 1) == 0 || name[0] == '\0') {
|
||||
free(name);
|
||||
list = list->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
if(list->record->recordType == kHFSPlusFolderRecord) {
|
||||
folder = (HFSPlusCatalogFolder*)list->record;
|
||||
printf("folder: %s\n", name);
|
||||
if(stat(name, &status) != 0) {
|
||||
ASSERT(mkdir(name, 0755) == 0, "mkdir");
|
||||
}
|
||||
ASSERT(chdir(name) == 0, "chdir");
|
||||
extractAllInFolder(folder->folderID, volume);
|
||||
ASSERT(chdir(cwd) == 0, "chdir");
|
||||
} else if(list->record->recordType == kHFSPlusFileRecord) {
|
||||
printf("file: %s\n", name);
|
||||
file = (HFSPlusCatalogFile*)list->record;
|
||||
outFile = createAbstractFileFromFile(fopen(name, "wb"));
|
||||
if(outFile != NULL) {
|
||||
writeToFile(file, outFile, volume);
|
||||
outFile->close(outFile);
|
||||
} else {
|
||||
printf("WARNING: cannot fopen %s\n", name);
|
||||
}
|
||||
}
|
||||
|
||||
free(name);
|
||||
list = list->next;
|
||||
}
|
||||
releaseCatalogRecordList(theList);
|
||||
}
|
||||
|
||||
|
||||
void addall_hfs(Volume* volume, const char* dirToMerge, const char* dest) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
char* name;
|
||||
char cwd[1024];
|
||||
char initPath[1024];
|
||||
int lastCharOfPath;
|
||||
|
||||
ASSERT(getcwd(cwd, 1024) != NULL, "cannot get current working directory");
|
||||
|
||||
if(chdir(dirToMerge) != 0) {
|
||||
printf("Cannot open that directory: %s\n", dirToMerge);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
record = getRecordFromPath(dest, volume, &name, NULL);
|
||||
strcpy(initPath, dest);
|
||||
lastCharOfPath = strlen(dest) - 1;
|
||||
if(dest[lastCharOfPath] != '/') {
|
||||
initPath[lastCharOfPath + 1] = '/';
|
||||
initPath[lastCharOfPath + 2] = '\0';
|
||||
}
|
||||
|
||||
if(record != NULL) {
|
||||
if(record->recordType == kHFSPlusFolderRecord)
|
||||
addAllInFolder(((HFSPlusCatalogFolder*)record)->folderID, volume, initPath);
|
||||
else {
|
||||
printf("Not a folder\n");
|
||||
exit(0);
|
||||
}
|
||||
} else {
|
||||
printf("No such file or directory\n");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
ASSERT(chdir(cwd) == 0, "chdir");
|
||||
free(record);
|
||||
|
||||
}
|
||||
|
||||
int copyAcrossVolumes(Volume* volume1, Volume* volume2, char* path1, char* path2) {
|
||||
void* buffer;
|
||||
size_t bufferSize;
|
||||
AbstractFile* tmpFile;
|
||||
int ret;
|
||||
|
||||
buffer = malloc(1);
|
||||
bufferSize = 0;
|
||||
tmpFile = createAbstractFileFromMemoryFile((void**)&buffer, &bufferSize);
|
||||
|
||||
if(!silence)
|
||||
{
|
||||
printf("retrieving... "); fflush(stdout);
|
||||
}
|
||||
|
||||
get_hfs(volume1, path1, tmpFile);
|
||||
tmpFile->seek(tmpFile, 0);
|
||||
|
||||
if(!silence)
|
||||
{
|
||||
printf("writing (%ld)... ", (long) tmpFile->getLength(tmpFile)); fflush(stdout);
|
||||
}
|
||||
|
||||
ret = add_hfs(volume2, tmpFile, path2);
|
||||
|
||||
if(!silence)
|
||||
{
|
||||
printf("done\n");
|
||||
}
|
||||
|
||||
free(buffer);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void displayFolder(HFSCatalogNodeID folderID, Volume* volume) {
|
||||
CatalogRecordList* list;
|
||||
CatalogRecordList* theList;
|
||||
HFSPlusCatalogFolder* folder;
|
||||
HFSPlusCatalogFile* file;
|
||||
time_t fileTime;
|
||||
struct tm *date;
|
||||
HFSPlusDecmpfs* compressData;
|
||||
size_t attrSize;
|
||||
|
||||
theList = list = getFolderContents(folderID, volume);
|
||||
|
||||
while(list != NULL) {
|
||||
if(list->record->recordType == kHFSPlusFolderRecord) {
|
||||
folder = (HFSPlusCatalogFolder*)list->record;
|
||||
printf("%06o ", folder->permissions.fileMode);
|
||||
printf("%3d ", folder->permissions.ownerID);
|
||||
printf("%3d ", folder->permissions.groupID);
|
||||
printf("%12d ", folder->valence);
|
||||
fileTime = APPLE_TO_UNIX_TIME(folder->contentModDate);
|
||||
} else if(list->record->recordType == kHFSPlusFileRecord) {
|
||||
file = (HFSPlusCatalogFile*)list->record;
|
||||
printf("%06o ", file->permissions.fileMode);
|
||||
printf("%3d ", file->permissions.ownerID);
|
||||
printf("%3d ", file->permissions.groupID);
|
||||
if(file->permissions.ownerFlags & UF_COMPRESSED) {
|
||||
attrSize = getAttribute(volume, file->fileID, "com.apple.decmpfs", (uint8_t**)(&compressData));
|
||||
flipHFSPlusDecmpfs(compressData);
|
||||
printf("%12" PRId64 " ", compressData->size);
|
||||
free(compressData);
|
||||
} else {
|
||||
printf("%12" PRId64 " ", file->dataFork.logicalSize);
|
||||
}
|
||||
fileTime = APPLE_TO_UNIX_TIME(file->contentModDate);
|
||||
}
|
||||
|
||||
date = localtime(&fileTime);
|
||||
if(date != NULL) {
|
||||
printf("%2d/%2d/%4d %02d:%02d ", date->tm_mon, date->tm_mday, date->tm_year + 1900, date->tm_hour, date->tm_min);
|
||||
} else {
|
||||
printf(" ");
|
||||
}
|
||||
|
||||
printUnicode(&list->name);
|
||||
printf("\n");
|
||||
|
||||
list = list->next;
|
||||
}
|
||||
|
||||
releaseCatalogRecordList(theList);
|
||||
}
|
||||
|
||||
void displayFileLSLine(Volume* volume, HFSPlusCatalogFile* file, const char* name) {
|
||||
time_t fileTime;
|
||||
struct tm *date;
|
||||
HFSPlusDecmpfs* compressData;
|
||||
|
||||
printf("%06o ", file->permissions.fileMode);
|
||||
printf("%3d ", file->permissions.ownerID);
|
||||
printf("%3d ", file->permissions.groupID);
|
||||
|
||||
if(file->permissions.ownerFlags & UF_COMPRESSED) {
|
||||
getAttribute(volume, file->fileID, "com.apple.decmpfs", (uint8_t**)(&compressData));
|
||||
flipHFSPlusDecmpfs(compressData);
|
||||
printf("%12" PRId64 " ", compressData->size);
|
||||
free(compressData);
|
||||
} else {
|
||||
printf("%12" PRId64 " ", file->dataFork.logicalSize);
|
||||
}
|
||||
|
||||
fileTime = APPLE_TO_UNIX_TIME(file->contentModDate);
|
||||
date = localtime(&fileTime);
|
||||
if(date != NULL) {
|
||||
printf("%2d/%2d/%4d %2d:%02d ", date->tm_mon, date->tm_mday, date->tm_year + 1900, date->tm_hour, date->tm_min);
|
||||
} else {
|
||||
printf(" ");
|
||||
}
|
||||
printf("%s\n", name);
|
||||
|
||||
XAttrList* next;
|
||||
XAttrList* attrs = getAllExtendedAttributes(file->fileID, volume);
|
||||
if(attrs != NULL) {
|
||||
printf("Extended attributes\n");
|
||||
while(attrs != NULL) {
|
||||
next = attrs->next;
|
||||
printf("\t%s\n", attrs->name);
|
||||
free(attrs->name);
|
||||
free(attrs);
|
||||
attrs = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void hfs_ls(Volume* volume, const char* path) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
char* name;
|
||||
|
||||
record = getRecordFromPath(path, volume, &name, NULL);
|
||||
|
||||
printf("%s: \n", name);
|
||||
if(record != NULL) {
|
||||
if(record->recordType == kHFSPlusFolderRecord)
|
||||
displayFolder(((HFSPlusCatalogFolder*)record)->folderID, volume);
|
||||
else
|
||||
displayFileLSLine(volume, (HFSPlusCatalogFile*)record, name);
|
||||
} else {
|
||||
printf("No such file or directory\n");
|
||||
}
|
||||
|
||||
printf("Total filesystem size: %d, free: %d\n", (volume->volumeHeader->totalBlocks - volume->volumeHeader->freeBlocks) * volume->volumeHeader->blockSize, volume->volumeHeader->freeBlocks * volume->volumeHeader->blockSize);
|
||||
|
||||
free(record);
|
||||
}
|
||||
|
||||
void hfs_untar(Volume* volume, AbstractFile* tarFile) {
|
||||
size_t tarSize = tarFile->getLength(tarFile);
|
||||
size_t curRecord = 0;
|
||||
char block[512];
|
||||
|
||||
while(curRecord < tarSize) {
|
||||
tarFile->seek(tarFile, curRecord);
|
||||
tarFile->read(tarFile, block, 512);
|
||||
|
||||
uint32_t mode = 0;
|
||||
char* fileName = NULL;
|
||||
const char* target = NULL;
|
||||
uint32_t type = 0;
|
||||
uint32_t size;
|
||||
uint32_t uid;
|
||||
uint32_t gid;
|
||||
|
||||
sscanf(&block[100], "%o", &mode);
|
||||
fileName = &block[0];
|
||||
sscanf(&block[156], "%o", &type);
|
||||
target = &block[157];
|
||||
sscanf(&block[124], "%o", &size);
|
||||
sscanf(&block[108], "%o", &uid);
|
||||
sscanf(&block[116], "%o", &gid);
|
||||
|
||||
if(fileName[0] == '\0')
|
||||
break;
|
||||
|
||||
if(fileName[0] == '.' && fileName[1] == '/') {
|
||||
fileName += 2;
|
||||
}
|
||||
|
||||
if(fileName[0] == '\0')
|
||||
goto loop;
|
||||
|
||||
if(fileName[strlen(fileName) - 1] == '/')
|
||||
fileName[strlen(fileName) - 1] = '\0';
|
||||
|
||||
HFSPlusCatalogRecord* record = getRecordFromPath3(fileName, volume, NULL, NULL, TRUE, FALSE, kHFSRootFolderID);
|
||||
if(record) {
|
||||
if(record->recordType == kHFSPlusFolderRecord || type == 5) {
|
||||
if(!silence)
|
||||
printf("ignoring %s, type = %d\n", fileName, type);
|
||||
free(record);
|
||||
goto loop;
|
||||
} else {
|
||||
printf("replacing %s\n", fileName);
|
||||
free(record);
|
||||
removeFile(fileName, volume);
|
||||
}
|
||||
}
|
||||
|
||||
if(type == 0) {
|
||||
if(!silence)
|
||||
printf("file: %s (%04o), size = %d\n", fileName, mode, size);
|
||||
void* buffer = malloc(size);
|
||||
tarFile->seek(tarFile, curRecord + 512);
|
||||
tarFile->read(tarFile, buffer, size);
|
||||
AbstractFile* inFile = createAbstractFileFromMemory(&buffer, size);
|
||||
add_hfs(volume, inFile, fileName);
|
||||
free(buffer);
|
||||
} else if(type == 5) {
|
||||
if(!silence)
|
||||
printf("directory: %s (%04o)\n", fileName, mode);
|
||||
newFolder(fileName, volume);
|
||||
} else if(type == 2) {
|
||||
if(!silence)
|
||||
printf("symlink: %s (%04o) -> %s\n", fileName, mode, target);
|
||||
makeSymlink(fileName, target, volume);
|
||||
}
|
||||
|
||||
chmodFile(fileName, mode, volume);
|
||||
chownFile(fileName, uid, gid, volume);
|
||||
|
||||
loop:
|
||||
|
||||
curRecord = (curRecord + 512) + ((size + 511) / 512 * 512);
|
||||
}
|
||||
|
||||
}
|
||||
|
502
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/rawfile.c
Normal file
502
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/rawfile.c
Normal file
@ -0,0 +1,502 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
|
||||
int writeExtents(RawFile* rawFile);
|
||||
|
||||
int isBlockUsed(Volume* volume, uint32_t block)
|
||||
{
|
||||
unsigned char byte;
|
||||
|
||||
READ(volume->allocationFile, block / 8, 1, &byte);
|
||||
return (byte & (1 << (7 - (block % 8)))) != 0;
|
||||
}
|
||||
|
||||
int setBlockUsed(Volume* volume, uint32_t block, int used) {
|
||||
unsigned char byte;
|
||||
|
||||
READ(volume->allocationFile, block / 8, 1, &byte);
|
||||
if(used) {
|
||||
byte |= (1 << (7 - (block % 8)));
|
||||
} else {
|
||||
byte &= ~(1 << (7 - (block % 8)));
|
||||
}
|
||||
ASSERT(WRITE(volume->allocationFile, block / 8, 1, &byte), "WRITE");
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
int allocate(RawFile* rawFile, off_t size) {
|
||||
unsigned char* zeros;
|
||||
Volume* volume;
|
||||
HFSPlusForkData* forkData;
|
||||
uint32_t blocksNeeded;
|
||||
uint32_t blocksToAllocate;
|
||||
Extent* extent;
|
||||
Extent* lastExtent;
|
||||
|
||||
uint32_t curBlock;
|
||||
|
||||
volume = rawFile->volume;
|
||||
forkData = rawFile->forkData;
|
||||
extent = rawFile->extents;
|
||||
|
||||
blocksNeeded = ((uint64_t)size / (uint64_t)volume->volumeHeader->blockSize) + (((size % volume->volumeHeader->blockSize) == 0) ? 0 : 1);
|
||||
|
||||
if(blocksNeeded > forkData->totalBlocks) {
|
||||
zeros = (unsigned char*) malloc(volume->volumeHeader->blockSize);
|
||||
memset(zeros, 0, volume->volumeHeader->blockSize);
|
||||
|
||||
blocksToAllocate = blocksNeeded - forkData->totalBlocks;
|
||||
|
||||
if(blocksToAllocate > volume->volumeHeader->freeBlocks) {
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
lastExtent = NULL;
|
||||
while(extent != NULL) {
|
||||
lastExtent = extent;
|
||||
extent = extent->next;
|
||||
}
|
||||
|
||||
if(lastExtent == NULL) {
|
||||
rawFile->extents = (Extent*) malloc(sizeof(Extent));
|
||||
lastExtent = rawFile->extents;
|
||||
lastExtent->blockCount = 0;
|
||||
lastExtent->next = NULL;
|
||||
curBlock = volume->volumeHeader->nextAllocation;
|
||||
} else {
|
||||
curBlock = lastExtent->startBlock + lastExtent->blockCount;
|
||||
}
|
||||
|
||||
while(blocksToAllocate > 0) {
|
||||
if(isBlockUsed(volume, curBlock)) {
|
||||
if(lastExtent->blockCount > 0) {
|
||||
lastExtent->next = (Extent*) malloc(sizeof(Extent));
|
||||
lastExtent = lastExtent->next;
|
||||
lastExtent->blockCount = 0;
|
||||
lastExtent->next = NULL;
|
||||
}
|
||||
curBlock = volume->volumeHeader->nextAllocation;
|
||||
volume->volumeHeader->nextAllocation++;
|
||||
if(volume->volumeHeader->nextAllocation >= volume->volumeHeader->totalBlocks) {
|
||||
volume->volumeHeader->nextAllocation = 0;
|
||||
}
|
||||
} else {
|
||||
if(lastExtent->blockCount == 0) {
|
||||
lastExtent->startBlock = curBlock;
|
||||
}
|
||||
|
||||
/* zero out allocated block */
|
||||
ASSERT(WRITE(volume->image, curBlock * volume->volumeHeader->blockSize, volume->volumeHeader->blockSize, zeros), "WRITE");
|
||||
|
||||
setBlockUsed(volume, curBlock, TRUE);
|
||||
volume->volumeHeader->freeBlocks--;
|
||||
blocksToAllocate--;
|
||||
curBlock++;
|
||||
lastExtent->blockCount++;
|
||||
|
||||
if(curBlock >= volume->volumeHeader->totalBlocks) {
|
||||
curBlock = volume->volumeHeader->nextAllocation;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free(zeros);
|
||||
} else if(blocksNeeded < forkData->totalBlocks) {
|
||||
blocksToAllocate = blocksNeeded;
|
||||
|
||||
lastExtent = NULL;
|
||||
|
||||
while(blocksToAllocate > 0) {
|
||||
if(blocksToAllocate > extent->blockCount) {
|
||||
blocksToAllocate -= extent->blockCount;
|
||||
lastExtent = extent;
|
||||
extent = extent->next;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if(blocksToAllocate == 0 && lastExtent != NULL) {
|
||||
// snip the extent list here, since we don't need the rest
|
||||
lastExtent->next = NULL;
|
||||
} else if(blocksNeeded == 0) {
|
||||
rawFile->extents = NULL;
|
||||
}
|
||||
|
||||
do {
|
||||
for(curBlock = (extent->startBlock + blocksToAllocate); curBlock < (extent->startBlock + extent->blockCount); curBlock++) {
|
||||
setBlockUsed(volume, curBlock, FALSE);
|
||||
volume->volumeHeader->freeBlocks++;
|
||||
}
|
||||
lastExtent = extent;
|
||||
extent = extent->next;
|
||||
|
||||
if(blocksToAllocate == 0)
|
||||
{
|
||||
free(lastExtent);
|
||||
} else {
|
||||
lastExtent->next = NULL;
|
||||
lastExtent->blockCount = blocksToAllocate;
|
||||
}
|
||||
|
||||
blocksToAllocate = 0;
|
||||
} while(extent != NULL);
|
||||
}
|
||||
|
||||
writeExtents(rawFile);
|
||||
|
||||
forkData->logicalSize = size;
|
||||
forkData->totalBlocks = blocksNeeded;
|
||||
|
||||
updateVolume(rawFile->volume);
|
||||
|
||||
if(rawFile->catalogRecord != NULL) {
|
||||
updateCatalog(rawFile->volume, rawFile->catalogRecord);
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static int rawFileRead(io_func* io,off_t location, size_t size, void *buffer) {
|
||||
RawFile* rawFile;
|
||||
Volume* volume;
|
||||
Extent* extent;
|
||||
|
||||
size_t blockSize;
|
||||
off_t fileLoc;
|
||||
off_t locationInBlock;
|
||||
size_t possible;
|
||||
|
||||
rawFile = (RawFile*) io->data;
|
||||
volume = rawFile->volume;
|
||||
blockSize = volume->volumeHeader->blockSize;
|
||||
|
||||
if(!rawFile->extents)
|
||||
return FALSE;
|
||||
|
||||
extent = rawFile->extents;
|
||||
fileLoc = 0;
|
||||
|
||||
locationInBlock = location;
|
||||
while(TRUE) {
|
||||
fileLoc += extent->blockCount * blockSize;
|
||||
if(fileLoc <= location) {
|
||||
locationInBlock -= extent->blockCount * blockSize;
|
||||
extent = extent->next;
|
||||
if(extent == NULL)
|
||||
break;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
while(size > 0) {
|
||||
if(extent == NULL)
|
||||
return FALSE;
|
||||
|
||||
possible = extent->blockCount * blockSize - locationInBlock;
|
||||
|
||||
if(size > possible) {
|
||||
ASSERT(READ(volume->image, extent->startBlock * blockSize + locationInBlock, possible, buffer), "READ");
|
||||
size -= possible;
|
||||
buffer = (void*)(((size_t)buffer) + possible);
|
||||
extent = extent->next;
|
||||
} else {
|
||||
ASSERT(READ(volume->image, extent->startBlock * blockSize + locationInBlock, size, buffer), "READ");
|
||||
break;
|
||||
}
|
||||
|
||||
locationInBlock = 0;
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static int rawFileWrite(io_func* io,off_t location, size_t size, void *buffer) {
|
||||
RawFile* rawFile;
|
||||
Volume* volume;
|
||||
Extent* extent;
|
||||
|
||||
size_t blockSize;
|
||||
off_t fileLoc;
|
||||
off_t locationInBlock;
|
||||
size_t possible;
|
||||
|
||||
rawFile = (RawFile*) io->data;
|
||||
volume = rawFile->volume;
|
||||
blockSize = volume->volumeHeader->blockSize;
|
||||
|
||||
if(rawFile->forkData->logicalSize < (location + size)) {
|
||||
ASSERT(allocate(rawFile, location + size), "allocate");
|
||||
}
|
||||
|
||||
extent = rawFile->extents;
|
||||
fileLoc = 0;
|
||||
|
||||
locationInBlock = location;
|
||||
while(TRUE) {
|
||||
fileLoc += extent->blockCount * blockSize;
|
||||
if(fileLoc <= location) {
|
||||
locationInBlock -= extent->blockCount * blockSize;
|
||||
extent = extent->next;
|
||||
if(extent == NULL)
|
||||
break;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
while(size > 0) {
|
||||
if(extent == NULL)
|
||||
return FALSE;
|
||||
|
||||
possible = extent->blockCount * blockSize - locationInBlock;
|
||||
|
||||
if(size > possible) {
|
||||
ASSERT(WRITE(volume->image, extent->startBlock * blockSize + locationInBlock, possible, buffer), "WRITE");
|
||||
size -= possible;
|
||||
buffer = (void*)(((size_t)buffer) + possible);
|
||||
extent = extent->next;
|
||||
} else {
|
||||
ASSERT(WRITE(volume->image, extent->startBlock * blockSize + locationInBlock, size, buffer), "WRITE");
|
||||
break;
|
||||
}
|
||||
|
||||
locationInBlock = 0;
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void closeRawFile(io_func* io) {
|
||||
RawFile* rawFile;
|
||||
Extent* extent;
|
||||
Extent* toRemove;
|
||||
|
||||
rawFile = (RawFile*) io->data;
|
||||
extent = rawFile->extents;
|
||||
|
||||
while(extent != NULL) {
|
||||
toRemove = extent;
|
||||
extent = extent->next;
|
||||
free(toRemove);
|
||||
}
|
||||
|
||||
free(rawFile);
|
||||
free(io);
|
||||
}
|
||||
|
||||
int removeExtents(RawFile* rawFile) {
|
||||
uint32_t blocksLeft;
|
||||
HFSPlusForkData* forkData;
|
||||
uint32_t currentBlock;
|
||||
|
||||
uint32_t startBlock;
|
||||
uint32_t blockCount;
|
||||
|
||||
HFSPlusExtentDescriptor* descriptor;
|
||||
int currentExtent;
|
||||
HFSPlusExtentKey extentKey;
|
||||
int exact;
|
||||
|
||||
extentKey.keyLength = sizeof(HFSPlusExtentKey) - sizeof(extentKey.keyLength);
|
||||
extentKey.forkType = 0;
|
||||
extentKey.fileID = rawFile->id;
|
||||
|
||||
forkData = rawFile->forkData;
|
||||
blocksLeft = forkData->totalBlocks;
|
||||
currentExtent = 0;
|
||||
currentBlock = 0;
|
||||
descriptor = (HFSPlusExtentDescriptor*) forkData->extents;
|
||||
|
||||
while(blocksLeft > 0) {
|
||||
if(currentExtent == 8) {
|
||||
if(rawFile->volume->extentsTree == NULL) {
|
||||
hfs_panic("no extents overflow file loaded yet!");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if(descriptor != ((HFSPlusExtentDescriptor*) forkData->extents)) {
|
||||
free(descriptor);
|
||||
}
|
||||
|
||||
extentKey.startBlock = currentBlock;
|
||||
descriptor = (HFSPlusExtentDescriptor*) search(rawFile->volume->extentsTree, (BTKey*)(&extentKey), &exact, NULL, NULL);
|
||||
if(descriptor == NULL || exact == FALSE) {
|
||||
hfs_panic("inconsistent extents information!");
|
||||
return FALSE;
|
||||
} else {
|
||||
removeFromBTree(rawFile->volume->extentsTree, (BTKey*)(&extentKey));
|
||||
currentExtent = 0;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
startBlock = descriptor[currentExtent].startBlock;
|
||||
blockCount = descriptor[currentExtent].blockCount;
|
||||
|
||||
currentBlock += blockCount;
|
||||
blocksLeft -= blockCount;
|
||||
currentExtent++;
|
||||
}
|
||||
|
||||
if(descriptor != ((HFSPlusExtentDescriptor*) forkData->extents)) {
|
||||
free(descriptor);
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
int writeExtents(RawFile* rawFile) {
|
||||
Extent* extent;
|
||||
int currentExtent;
|
||||
HFSPlusExtentKey extentKey;
|
||||
HFSPlusExtentDescriptor descriptor[8];
|
||||
HFSPlusForkData* forkData;
|
||||
|
||||
removeExtents(rawFile);
|
||||
|
||||
forkData = rawFile->forkData;
|
||||
currentExtent = 0;
|
||||
extent = rawFile->extents;
|
||||
|
||||
memset(forkData->extents, 0, sizeof(HFSPlusExtentRecord));
|
||||
while(extent != NULL && currentExtent < 8) {
|
||||
((HFSPlusExtentDescriptor*)forkData->extents)[currentExtent].startBlock = extent->startBlock;
|
||||
((HFSPlusExtentDescriptor*)forkData->extents)[currentExtent].blockCount = extent->blockCount;
|
||||
extent = extent->next;
|
||||
currentExtent++;
|
||||
}
|
||||
|
||||
if(extent != NULL) {
|
||||
extentKey.keyLength = sizeof(HFSPlusExtentKey) - sizeof(extentKey.keyLength);
|
||||
extentKey.forkType = 0;
|
||||
extentKey.fileID = rawFile->id;
|
||||
|
||||
currentExtent = 0;
|
||||
|
||||
while(extent != NULL) {
|
||||
if(currentExtent == 0) {
|
||||
memset(descriptor, 0, sizeof(HFSPlusExtentRecord));
|
||||
}
|
||||
|
||||
if(currentExtent == 8) {
|
||||
extentKey.startBlock = descriptor[0].startBlock;
|
||||
addToBTree(rawFile->volume->extentsTree, (BTKey*)(&extentKey), sizeof(HFSPlusExtentRecord), (unsigned char *)(&(descriptor[0])));
|
||||
currentExtent = 0;
|
||||
}
|
||||
|
||||
descriptor[currentExtent].startBlock = extent->startBlock;
|
||||
descriptor[currentExtent].blockCount = extent->blockCount;
|
||||
|
||||
currentExtent++;
|
||||
extent = extent->next;
|
||||
}
|
||||
|
||||
extentKey.startBlock = descriptor[0].startBlock;
|
||||
addToBTree(rawFile->volume->extentsTree, (BTKey*)(&extentKey), sizeof(HFSPlusExtentRecord), (unsigned char *)(&(descriptor[0])));
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
int readExtents(RawFile* rawFile) {
|
||||
uint32_t blocksLeft;
|
||||
HFSPlusForkData* forkData;
|
||||
uint32_t currentBlock;
|
||||
|
||||
Extent* extent;
|
||||
Extent* lastExtent;
|
||||
|
||||
HFSPlusExtentDescriptor* descriptor;
|
||||
int currentExtent;
|
||||
HFSPlusExtentKey extentKey;
|
||||
int exact;
|
||||
|
||||
extentKey.keyLength = sizeof(HFSPlusExtentKey) - sizeof(extentKey.keyLength);
|
||||
extentKey.forkType = 0;
|
||||
extentKey.fileID = rawFile->id;
|
||||
|
||||
forkData = rawFile->forkData;
|
||||
blocksLeft = forkData->totalBlocks;
|
||||
currentExtent = 0;
|
||||
currentBlock = 0;
|
||||
descriptor = (HFSPlusExtentDescriptor*) forkData->extents;
|
||||
|
||||
lastExtent = NULL;
|
||||
|
||||
while(blocksLeft > 0) {
|
||||
extent = (Extent*) malloc(sizeof(Extent));
|
||||
|
||||
if(currentExtent == 8) {
|
||||
if(rawFile->volume->extentsTree == NULL) {
|
||||
hfs_panic("no extents overflow file loaded yet!");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if(descriptor != ((HFSPlusExtentDescriptor*) forkData->extents)) {
|
||||
free(descriptor);
|
||||
}
|
||||
|
||||
extentKey.startBlock = currentBlock;
|
||||
descriptor = (HFSPlusExtentDescriptor*) search(rawFile->volume->extentsTree, (BTKey*)(&extentKey), &exact, NULL, NULL);
|
||||
if(descriptor == NULL || exact == FALSE) {
|
||||
hfs_panic("inconsistent extents information!");
|
||||
return FALSE;
|
||||
} else {
|
||||
currentExtent = 0;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
extent->startBlock = descriptor[currentExtent].startBlock;
|
||||
extent->blockCount = descriptor[currentExtent].blockCount;
|
||||
extent->next = NULL;
|
||||
|
||||
currentBlock += extent->blockCount;
|
||||
blocksLeft -= extent->blockCount;
|
||||
currentExtent++;
|
||||
|
||||
if(lastExtent == NULL) {
|
||||
rawFile->extents = extent;
|
||||
} else {
|
||||
lastExtent->next = extent;
|
||||
}
|
||||
|
||||
lastExtent = extent;
|
||||
}
|
||||
|
||||
if(descriptor != ((HFSPlusExtentDescriptor*) forkData->extents)) {
|
||||
free(descriptor);
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
io_func* openRawFile(HFSCatalogNodeID id, HFSPlusForkData* forkData, HFSPlusCatalogRecord* catalogRecord, Volume* volume) {
|
||||
io_func* io;
|
||||
RawFile* rawFile;
|
||||
|
||||
io = (io_func*) malloc(sizeof(io_func));
|
||||
rawFile = (RawFile*) malloc(sizeof(RawFile));
|
||||
|
||||
rawFile->id = id;
|
||||
rawFile->volume = volume;
|
||||
rawFile->forkData = forkData;
|
||||
rawFile->catalogRecord = catalogRecord;
|
||||
rawFile->extents = NULL;
|
||||
|
||||
io->data = rawFile;
|
||||
io->read = &rawFileRead;
|
||||
io->write = &rawFileWrite;
|
||||
io->close = &closeRawFile;
|
||||
|
||||
if(!readExtents(rawFile)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return io;
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
|
||||
void hfs_panic(const char* hfs_panicString) {
|
||||
fprintf(stderr, "%s\n", hfs_panicString);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
void printUnicode(HFSUniStr255* str) {
|
||||
int i;
|
||||
|
||||
for(i = 0; i < str->length; i++) {
|
||||
printf("%c", (char)(str->unicode[i] & 0xff));
|
||||
}
|
||||
}
|
||||
|
||||
char* unicodeToAscii(HFSUniStr255* str) {
|
||||
int i;
|
||||
char* toReturn;
|
||||
|
||||
toReturn = (char*) malloc(sizeof(char) * (str->length + 1));
|
||||
|
||||
for(i = 0; i < str->length; i++) {
|
||||
toReturn[i] = (char)(str->unicode[i] & 0xff);
|
||||
}
|
||||
toReturn[i] = '\0';
|
||||
|
||||
return toReturn;
|
||||
}
|
176
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/volume.c
Normal file
176
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/volume.c
Normal file
@ -0,0 +1,176 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
|
||||
void flipForkData(HFSPlusForkData* forkData) {
|
||||
FLIPENDIAN(forkData->logicalSize);
|
||||
FLIPENDIAN(forkData->clumpSize);
|
||||
FLIPENDIAN(forkData->totalBlocks);
|
||||
|
||||
flipExtentRecord(&forkData->extents);
|
||||
}
|
||||
|
||||
static HFSPlusVolumeHeader* readVolumeHeader(io_func* io, off_t offset) {
|
||||
HFSPlusVolumeHeader* volumeHeader;
|
||||
|
||||
volumeHeader = (HFSPlusVolumeHeader*) malloc(sizeof(HFSPlusVolumeHeader));
|
||||
|
||||
if(!(READ(io, offset, sizeof(HFSPlusVolumeHeader), volumeHeader)))
|
||||
return NULL;
|
||||
|
||||
FLIPENDIAN(volumeHeader->signature);
|
||||
FLIPENDIAN(volumeHeader->version);
|
||||
FLIPENDIAN(volumeHeader->attributes);
|
||||
FLIPENDIAN(volumeHeader->lastMountedVersion);
|
||||
FLIPENDIAN(volumeHeader->journalInfoBlock);
|
||||
FLIPENDIAN(volumeHeader->createDate);
|
||||
FLIPENDIAN(volumeHeader->modifyDate);
|
||||
FLIPENDIAN(volumeHeader->backupDate);
|
||||
FLIPENDIAN(volumeHeader->checkedDate);
|
||||
FLIPENDIAN(volumeHeader->fileCount);
|
||||
FLIPENDIAN(volumeHeader->folderCount);
|
||||
FLIPENDIAN(volumeHeader->blockSize);
|
||||
FLIPENDIAN(volumeHeader->totalBlocks);
|
||||
FLIPENDIAN(volumeHeader->freeBlocks);
|
||||
FLIPENDIAN(volumeHeader->nextAllocation);
|
||||
FLIPENDIAN(volumeHeader->rsrcClumpSize);
|
||||
FLIPENDIAN(volumeHeader->dataClumpSize);
|
||||
FLIPENDIAN(volumeHeader->nextCatalogID);
|
||||
FLIPENDIAN(volumeHeader->writeCount);
|
||||
FLIPENDIAN(volumeHeader->encodingsBitmap);
|
||||
|
||||
|
||||
flipForkData(&volumeHeader->allocationFile);
|
||||
flipForkData(&volumeHeader->extentsFile);
|
||||
flipForkData(&volumeHeader->catalogFile);
|
||||
flipForkData(&volumeHeader->attributesFile);
|
||||
flipForkData(&volumeHeader->startupFile);
|
||||
|
||||
return volumeHeader;
|
||||
}
|
||||
|
||||
static int writeVolumeHeader(io_func* io, HFSPlusVolumeHeader* volumeHeaderToWrite, off_t offset) {
|
||||
HFSPlusVolumeHeader* volumeHeader;
|
||||
|
||||
volumeHeader = (HFSPlusVolumeHeader*) malloc(sizeof(HFSPlusVolumeHeader));
|
||||
memcpy(volumeHeader, volumeHeaderToWrite, sizeof(HFSPlusVolumeHeader));
|
||||
|
||||
FLIPENDIAN(volumeHeader->signature);
|
||||
FLIPENDIAN(volumeHeader->version);
|
||||
FLIPENDIAN(volumeHeader->attributes);
|
||||
FLIPENDIAN(volumeHeader->lastMountedVersion);
|
||||
FLIPENDIAN(volumeHeader->journalInfoBlock);
|
||||
FLIPENDIAN(volumeHeader->createDate);
|
||||
FLIPENDIAN(volumeHeader->modifyDate);
|
||||
FLIPENDIAN(volumeHeader->backupDate);
|
||||
FLIPENDIAN(volumeHeader->checkedDate);
|
||||
FLIPENDIAN(volumeHeader->fileCount);
|
||||
FLIPENDIAN(volumeHeader->folderCount);
|
||||
FLIPENDIAN(volumeHeader->blockSize);
|
||||
FLIPENDIAN(volumeHeader->totalBlocks);
|
||||
FLIPENDIAN(volumeHeader->freeBlocks);
|
||||
FLIPENDIAN(volumeHeader->nextAllocation);
|
||||
FLIPENDIAN(volumeHeader->rsrcClumpSize);
|
||||
FLIPENDIAN(volumeHeader->dataClumpSize);
|
||||
FLIPENDIAN(volumeHeader->nextCatalogID);
|
||||
FLIPENDIAN(volumeHeader->writeCount);
|
||||
FLIPENDIAN(volumeHeader->encodingsBitmap);
|
||||
|
||||
|
||||
flipForkData(&volumeHeader->allocationFile);
|
||||
flipForkData(&volumeHeader->extentsFile);
|
||||
flipForkData(&volumeHeader->catalogFile);
|
||||
flipForkData(&volumeHeader->attributesFile);
|
||||
flipForkData(&volumeHeader->startupFile);
|
||||
|
||||
if(!(WRITE(io, offset, sizeof(HFSPlusVolumeHeader), volumeHeader)))
|
||||
return FALSE;
|
||||
|
||||
free(volumeHeader);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
int updateVolume(Volume* volume) {
|
||||
ASSERT(writeVolumeHeader(volume->image, volume->volumeHeader,
|
||||
((off_t)volume->volumeHeader->totalBlocks * (off_t)volume->volumeHeader->blockSize) - 1024), "writeVolumeHeader");
|
||||
return writeVolumeHeader(volume->image, volume->volumeHeader, 1024);
|
||||
}
|
||||
|
||||
Volume* openVolume(io_func* io) {
|
||||
Volume* volume;
|
||||
io_func* file;
|
||||
|
||||
volume = (Volume*) malloc(sizeof(Volume));
|
||||
volume->image = io;
|
||||
volume->extentsTree = NULL;
|
||||
|
||||
volume->volumeHeader = readVolumeHeader(io, 1024);
|
||||
if(volume->volumeHeader == NULL) {
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
file = openRawFile(kHFSExtentsFileID, &volume->volumeHeader->extentsFile, NULL, volume);
|
||||
if(file == NULL) {
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->extentsTree = openExtentsTree(file);
|
||||
if(volume->extentsTree == NULL) {
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
file = openRawFile(kHFSCatalogFileID, &volume->volumeHeader->catalogFile, NULL, volume);
|
||||
if(file == NULL) {
|
||||
closeBTree(volume->extentsTree);
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->catalogTree = openCatalogTree(file);
|
||||
if(volume->catalogTree == NULL) {
|
||||
closeBTree(volume->extentsTree);
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->allocationFile = openRawFile(kHFSAllocationFileID, &volume->volumeHeader->allocationFile, NULL, volume);
|
||||
if(volume->allocationFile == NULL) {
|
||||
closeBTree(volume->catalogTree);
|
||||
closeBTree(volume->extentsTree);
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->attrTree = NULL;
|
||||
file = openRawFile(kHFSAttributesFileID, &volume->volumeHeader->attributesFile, NULL, volume);
|
||||
if(file != NULL) {
|
||||
volume->attrTree = openAttributesTree(file);
|
||||
if(!volume->attrTree) {
|
||||
CLOSE(file);
|
||||
}
|
||||
}
|
||||
|
||||
volume->metadataDir = getMetadataDirectoryID(volume);
|
||||
|
||||
return volume;
|
||||
}
|
||||
|
||||
void closeVolume(Volume *volume) {
|
||||
if(volume->attrTree)
|
||||
closeBTree(volume->attrTree);
|
||||
|
||||
CLOSE(volume->allocationFile);
|
||||
closeBTree(volume->catalogTree);
|
||||
closeBTree(volume->extentsTree);
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
}
|
374
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/xattr.c
Normal file
374
dump-imessages/iphone-dataprotection/emf_decrypter/hfs/xattr.c
Normal file
@ -0,0 +1,374 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
|
||||
static inline void flipAttrData(HFSPlusAttrData* data) {
|
||||
FLIPENDIAN(data->recordType);
|
||||
FLIPENDIAN(data->size);
|
||||
}
|
||||
|
||||
static inline void flipAttrForkData(HFSPlusAttrForkData* data) {
|
||||
FLIPENDIAN(data->recordType);
|
||||
flipForkData(&data->theFork);
|
||||
}
|
||||
|
||||
static inline void flipAttrExtents(HFSPlusAttrExtents* data) {
|
||||
FLIPENDIAN(data->recordType);
|
||||
flipExtentRecord(&data->extents);
|
||||
}
|
||||
|
||||
static int attrCompare(BTKey* vLeft, BTKey* vRight) {
|
||||
HFSPlusAttrKey* left;
|
||||
HFSPlusAttrKey* right;
|
||||
uint16_t i;
|
||||
|
||||
uint16_t cLeft;
|
||||
uint16_t cRight;
|
||||
|
||||
left = (HFSPlusAttrKey*) vLeft;
|
||||
right =(HFSPlusAttrKey*) vRight;
|
||||
|
||||
if(left->fileID < right->fileID) {
|
||||
return -1;
|
||||
} else if(left->fileID > right->fileID) {
|
||||
return 1;
|
||||
} else {
|
||||
for(i = 0; i < left->name.length; i++) {
|
||||
if(i >= right->name.length) {
|
||||
return 1;
|
||||
} else {
|
||||
cLeft = left->name.unicode[i];
|
||||
cRight = right->name.unicode[i];
|
||||
|
||||
if(cLeft < cRight)
|
||||
return -1;
|
||||
else if(cLeft > cRight)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if(i < right->name.length) {
|
||||
return -1;
|
||||
} else {
|
||||
/* do a safety check on key length. Otherwise, bad things may happen later on when we try to add or remove with this key */
|
||||
/*if(left->keyLength == right->keyLength) {
|
||||
return 0;
|
||||
} else if(left->keyLength < right->keyLength) {
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}*/
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#define UNICODE_START (sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint16_t))
|
||||
|
||||
static BTKey* attrKeyRead(off_t offset, io_func* io) {
|
||||
int i;
|
||||
HFSPlusAttrKey* key;
|
||||
|
||||
key = (HFSPlusAttrKey*) malloc(sizeof(HFSPlusAttrKey));
|
||||
|
||||
if(!READ(io, offset, UNICODE_START, key))
|
||||
return NULL;
|
||||
|
||||
FLIPENDIAN(key->keyLength);
|
||||
FLIPENDIAN(key->fileID);
|
||||
FLIPENDIAN(key->startBlock);
|
||||
FLIPENDIAN(key->name.length);
|
||||
|
||||
if(key->name.length > 254)
|
||||
{
|
||||
printf("Invalid xattr key at offset %x\n", offset);
|
||||
free(key);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if(!READ(io, offset + UNICODE_START, key->name.length * sizeof(uint16_t), ((unsigned char *)key) + UNICODE_START))
|
||||
return NULL;
|
||||
|
||||
for(i = 0; i < key->name.length; i++) {
|
||||
FLIPENDIAN(key->name.unicode[i]);
|
||||
}
|
||||
|
||||
return (BTKey*)key;
|
||||
}
|
||||
|
||||
static int attrKeyWrite(off_t offset, BTKey* toWrite, io_func* io) {
|
||||
HFSPlusAttrKey* key;
|
||||
uint16_t keyLength;
|
||||
uint16_t nodeNameLength;
|
||||
int i;
|
||||
|
||||
keyLength = toWrite->keyLength;
|
||||
key = (HFSPlusAttrKey*) malloc(keyLength);
|
||||
memcpy(key, toWrite, keyLength);
|
||||
|
||||
nodeNameLength = key->name.length;
|
||||
|
||||
FLIPENDIAN(key->keyLength);
|
||||
FLIPENDIAN(key->fileID);
|
||||
FLIPENDIAN(key->startBlock);
|
||||
FLIPENDIAN(key->name.length);
|
||||
|
||||
for(i = 0; i < nodeNameLength; i++) {
|
||||
FLIPENDIAN(key->name.unicode[i]);
|
||||
}
|
||||
|
||||
if(!WRITE(io, offset, keyLength, key))
|
||||
return FALSE;
|
||||
|
||||
free(key);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void attrKeyPrint(BTKey* toPrint) {
|
||||
HFSPlusAttrKey* key;
|
||||
|
||||
key = (HFSPlusAttrKey*)toPrint;
|
||||
|
||||
printf("attribute%d:%d:", key->fileID, key->startBlock);
|
||||
printUnicode(&key->name);
|
||||
}
|
||||
|
||||
static BTKey* attrDataRead(off_t offset, io_func* io) {
|
||||
HFSPlusAttrRecord* record;
|
||||
|
||||
record = (HFSPlusAttrRecord*) malloc(sizeof(HFSPlusAttrRecord));
|
||||
|
||||
if(!READ(io, offset, sizeof(uint32_t), record))
|
||||
return NULL;
|
||||
|
||||
FLIPENDIAN(record->recordType);
|
||||
switch(record->recordType)
|
||||
{
|
||||
case kHFSPlusAttrInlineData:
|
||||
if(!READ(io, offset, sizeof(HFSPlusAttrData), record))
|
||||
return NULL;
|
||||
|
||||
flipAttrData((HFSPlusAttrData*) record);
|
||||
|
||||
record = realloc(record, sizeof(HFSPlusAttrData) + ((HFSPlusAttrData*) record)->size);
|
||||
if(!READ(io, offset + sizeof(HFSPlusAttrData), ((HFSPlusAttrData*) record)->size, ((HFSPlusAttrData*) record)->data))
|
||||
return NULL;
|
||||
|
||||
break;
|
||||
|
||||
case kHFSPlusAttrForkData:
|
||||
if(!READ(io, offset, sizeof(HFSPlusAttrForkData), record))
|
||||
return NULL;
|
||||
|
||||
flipAttrForkData((HFSPlusAttrForkData*) record);
|
||||
|
||||
break;
|
||||
|
||||
case kHFSPlusAttrExtents:
|
||||
if(!READ(io, offset, sizeof(HFSPlusAttrExtents), record))
|
||||
return NULL;
|
||||
|
||||
flipAttrExtents((HFSPlusAttrExtents*) record);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return (BTKey*)record;
|
||||
}
|
||||
|
||||
static int updateAttributes(Volume* volume, HFSPlusAttrKey* skey, HFSPlusAttrRecord* srecord) {
|
||||
HFSPlusAttrKey key;
|
||||
HFSPlusAttrRecord* record;
|
||||
int ret, len;
|
||||
|
||||
memcpy(&key, skey, skey->keyLength);
|
||||
|
||||
switch(srecord->recordType) {
|
||||
case kHFSPlusAttrInlineData:
|
||||
len = srecord->attrData.size + sizeof(HFSPlusAttrData);
|
||||
record = (HFSPlusAttrRecord*) malloc(len);
|
||||
memcpy(record, srecord, len);
|
||||
flipAttrData((HFSPlusAttrData*) record);
|
||||
removeFromBTree(volume->attrTree, (BTKey*)(&key));
|
||||
ret = addToBTree(volume->attrTree, (BTKey*)(&key), len, (unsigned char *)record);
|
||||
free(record);
|
||||
break;
|
||||
case kHFSPlusAttrForkData:
|
||||
record = (HFSPlusAttrRecord*) malloc(sizeof(HFSPlusAttrForkData));
|
||||
memcpy(record, srecord, sizeof(HFSPlusAttrForkData));
|
||||
flipAttrForkData((HFSPlusAttrForkData*) record);
|
||||
removeFromBTree(volume->attrTree, (BTKey*)(&key));
|
||||
ret = addToBTree(volume->attrTree, (BTKey*)(&key), sizeof(HFSPlusAttrForkData), (unsigned char *)record);
|
||||
free(record);
|
||||
break;
|
||||
case kHFSPlusAttrExtents:
|
||||
record = (HFSPlusAttrRecord*) malloc(sizeof(HFSPlusAttrExtents));
|
||||
memcpy(record, srecord, sizeof(HFSPlusAttrExtents));
|
||||
flipAttrExtents((HFSPlusAttrExtents*) record);
|
||||
removeFromBTree(volume->attrTree, (BTKey*)(&key));
|
||||
ret = addToBTree(volume->attrTree, (BTKey*)(&key), sizeof(HFSPlusAttrExtents), (unsigned char *)record);
|
||||
free(record);
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t getAttribute(Volume* volume, uint32_t fileID, const char* name, uint8_t** data) {
|
||||
HFSPlusAttrKey key;
|
||||
HFSPlusAttrRecord* record;
|
||||
size_t size;
|
||||
int exact;
|
||||
|
||||
if(!volume->attrTree)
|
||||
return FALSE;
|
||||
|
||||
memset(&key, 0 , sizeof(HFSPlusAttrKey));
|
||||
key.fileID = fileID;
|
||||
key.startBlock = 0;
|
||||
ASCIIToUnicode(name, &key.name);
|
||||
key.keyLength = sizeof(HFSPlusAttrKey) - sizeof(HFSUniStr255) + sizeof(key.name.length) + (sizeof(uint16_t) * key.name.length);
|
||||
|
||||
*data = NULL;
|
||||
|
||||
record = (HFSPlusAttrRecord*) search(volume->attrTree, (BTKey*)(&key), &exact, NULL, NULL);
|
||||
|
||||
if(exact == FALSE) {
|
||||
if(record)
|
||||
free(record);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch(record->recordType)
|
||||
{
|
||||
case kHFSPlusAttrInlineData:
|
||||
size = record->attrData.size;
|
||||
*data = (uint8_t*) malloc(size);
|
||||
memcpy(*data, record->attrData.data, size);
|
||||
free(record);
|
||||
return size;
|
||||
default:
|
||||
fprintf(stderr, "unsupported attribute node format\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int setAttribute(Volume* volume, uint32_t fileID, const char* name, uint8_t* data, size_t size) {
|
||||
HFSPlusAttrKey key;
|
||||
HFSPlusAttrData* record;
|
||||
int ret, exact;
|
||||
|
||||
if(!volume->attrTree)
|
||||
return FALSE;
|
||||
|
||||
memset(&key, 0 , sizeof(HFSPlusAttrKey));
|
||||
key.fileID = fileID;
|
||||
key.startBlock = 0;
|
||||
ASCIIToUnicode(name, &key.name);
|
||||
key.keyLength = sizeof(HFSPlusAttrKey) - sizeof(HFSUniStr255) + sizeof(key.name.length) + (sizeof(uint16_t) * key.name.length);
|
||||
|
||||
record = (HFSPlusAttrData*) malloc(sizeof(HFSPlusAttrData) + size);
|
||||
memset(record, 0, sizeof(HFSPlusAttrData));
|
||||
|
||||
record->recordType = kHFSPlusAttrInlineData;
|
||||
record->size = size;
|
||||
memcpy(record->data, data, size);
|
||||
|
||||
ret = updateAttributes(volume, &key, (HFSPlusAttrRecord*) record);
|
||||
|
||||
free(record);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int unsetAttribute(Volume* volume, uint32_t fileID, const char* name) {
|
||||
HFSPlusAttrKey key;
|
||||
|
||||
if(!volume->attrTree)
|
||||
return FALSE;
|
||||
|
||||
memset(&key, 0 , sizeof(HFSPlusAttrKey));
|
||||
key.fileID = fileID;
|
||||
key.startBlock = 0;
|
||||
ASCIIToUnicode(name, &key.name);
|
||||
key.keyLength = sizeof(HFSPlusAttrKey) - sizeof(HFSUniStr255) + sizeof(key.name.length) + (sizeof(uint16_t) * key.name.length);
|
||||
return removeFromBTree(volume->attrTree, (BTKey*)(&key));
|
||||
}
|
||||
|
||||
XAttrList* getAllExtendedAttributes(HFSCatalogNodeID CNID, Volume* volume) {
|
||||
BTree* tree;
|
||||
HFSPlusAttrKey key;
|
||||
HFSPlusAttrRecord* record;
|
||||
uint32_t nodeNumber;
|
||||
int recordNumber;
|
||||
BTNodeDescriptor* descriptor;
|
||||
HFSPlusAttrKey* currentKey;
|
||||
off_t recordOffset;
|
||||
XAttrList* list = NULL;
|
||||
XAttrList* lastItem = NULL;
|
||||
XAttrList* item = NULL;
|
||||
|
||||
if(!volume->attrTree)
|
||||
return NULL;
|
||||
|
||||
memset(&key, 0 , sizeof(HFSPlusAttrKey));
|
||||
key.fileID = CNID;
|
||||
key.startBlock = 0;
|
||||
key.name.length = 0;
|
||||
key.keyLength = sizeof(HFSPlusAttrKey) - sizeof(HFSUniStr255) + sizeof(key.name.length) + (sizeof(uint16_t) * key.name.length);
|
||||
|
||||
tree = volume->attrTree;
|
||||
record = (HFSPlusAttrRecord*) search(tree, (BTKey*)(&key), NULL, &nodeNumber, &recordNumber);
|
||||
if(record == NULL)
|
||||
return NULL;
|
||||
|
||||
free(record);
|
||||
|
||||
while(nodeNumber != 0) {
|
||||
descriptor = readBTNodeDescriptor(nodeNumber, tree);
|
||||
|
||||
while(recordNumber < descriptor->numRecords) {
|
||||
recordOffset = getRecordOffset(recordNumber, nodeNumber, tree);
|
||||
currentKey = (HFSPlusAttrKey*) READ_KEY(tree, recordOffset, tree->io);
|
||||
|
||||
if(currentKey->fileID == CNID) {
|
||||
item = (XAttrList*) malloc(sizeof(XAttrList));
|
||||
item->name = (char*) malloc(currentKey->name.length + 1);
|
||||
int i;
|
||||
for(i = 0; i < currentKey->name.length; i++) {
|
||||
item->name[i] = currentKey->name.unicode[i];
|
||||
}
|
||||
item->name[currentKey->name.length] = '\0';
|
||||
item->next = NULL;
|
||||
|
||||
if(lastItem != NULL) {
|
||||
lastItem->next = item;
|
||||
} else {
|
||||
list = item;
|
||||
}
|
||||
|
||||
lastItem = item;
|
||||
|
||||
free(currentKey);
|
||||
} else {
|
||||
free(currentKey);
|
||||
free(descriptor);
|
||||
return list;
|
||||
}
|
||||
|
||||
recordNumber++;
|
||||
}
|
||||
|
||||
nodeNumber = descriptor->fLink;
|
||||
recordNumber = 0;
|
||||
|
||||
free(descriptor);
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
BTree* openAttributesTree(io_func* file) {
|
||||
return openBTree(file, &attrCompare, &attrKeyRead, &attrKeyWrite, &attrKeyPrint, &attrDataRead);
|
||||
}
|
||||
|
@ -0,0 +1,75 @@
|
||||
#ifndef ABSTRACTFILE_H
|
||||
#define ABSTRACTFILE_H
|
||||
|
||||
#include "common.h"
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct AbstractFile AbstractFile;
|
||||
typedef struct AbstractFile2 AbstractFile2;
|
||||
|
||||
typedef size_t (*WriteFunc)(AbstractFile* file, const void* data, size_t len);
|
||||
typedef size_t (*ReadFunc)(AbstractFile* file, void* data, size_t len);
|
||||
typedef int (*SeekFunc)(AbstractFile* file, off_t offset);
|
||||
typedef off_t (*TellFunc)(AbstractFile* file);
|
||||
typedef void (*CloseFunc)(AbstractFile* file);
|
||||
typedef off_t (*GetLengthFunc)(AbstractFile* file);
|
||||
typedef void (*SetKeyFunc)(AbstractFile2* file, const unsigned int* key, const unsigned int* iv);
|
||||
|
||||
typedef enum AbstractFileType {
|
||||
AbstractFileTypeFile,
|
||||
AbstractFileType8900,
|
||||
AbstractFileTypeImg2,
|
||||
AbstractFileTypeImg3,
|
||||
AbstractFileTypeLZSS,
|
||||
AbstractFileTypeIBootIM,
|
||||
AbstractFileTypeMem,
|
||||
AbstractFileTypeMemFile,
|
||||
AbstractFileTypeDummy
|
||||
} AbstractFileType;
|
||||
|
||||
struct AbstractFile {
|
||||
void* data;
|
||||
WriteFunc write;
|
||||
ReadFunc read;
|
||||
SeekFunc seek;
|
||||
TellFunc tell;
|
||||
GetLengthFunc getLength;
|
||||
CloseFunc close;
|
||||
AbstractFileType type;
|
||||
};
|
||||
|
||||
struct AbstractFile2 {
|
||||
AbstractFile super;
|
||||
SetKeyFunc setKey;
|
||||
};
|
||||
|
||||
|
||||
typedef struct {
|
||||
size_t offset;
|
||||
void** buffer;
|
||||
size_t bufferSize;
|
||||
} MemWrapperInfo;
|
||||
|
||||
typedef struct {
|
||||
size_t offset;
|
||||
void** buffer;
|
||||
size_t* bufferSize;
|
||||
size_t actualBufferSize;
|
||||
} MemFileWrapperInfo;
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
AbstractFile* createAbstractFileFromFile(FILE* file);
|
||||
AbstractFile* createAbstractFileFromDummy();
|
||||
AbstractFile* createAbstractFileFromMemory(void** buffer, size_t size);
|
||||
AbstractFile* createAbstractFileFromMemoryFile(void** buffer, size_t* size);
|
||||
AbstractFile* createAbstractFileFromMemoryFileBuffer(void** buffer, size_t* size, size_t actualBufferSize);
|
||||
void abstractFilePrint(AbstractFile* file, const char* format, ...);
|
||||
io_func* IOFuncFromAbstractFile(AbstractFile* file);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -0,0 +1,108 @@
|
||||
#ifndef COMMON_H
|
||||
#define COMMON_H
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#ifdef WIN32
|
||||
#define fseeko fseeko64
|
||||
#define ftello ftello64
|
||||
#define off_t off64_t
|
||||
#define mkdir(x, y) mkdir(x)
|
||||
#define PATH_SEPARATOR "\\"
|
||||
#else
|
||||
#define PATH_SEPARATOR "/"
|
||||
#endif
|
||||
|
||||
#define TRUE 1
|
||||
#define FALSE 0
|
||||
|
||||
#define FLIPENDIAN(x) flipEndian((unsigned char *)(&(x)), sizeof(x))
|
||||
#define FLIPENDIANLE(x) flipEndianLE((unsigned char *)(&(x)), sizeof(x))
|
||||
|
||||
#define IS_BIG_ENDIAN 0
|
||||
#define IS_LITTLE_ENDIAN 1
|
||||
|
||||
#define TIME_OFFSET_FROM_UNIX 2082844800L
|
||||
#define APPLE_TO_UNIX_TIME(x) ((x) - TIME_OFFSET_FROM_UNIX)
|
||||
#define UNIX_TO_APPLE_TIME(x) ((x) + TIME_OFFSET_FROM_UNIX)
|
||||
|
||||
#define ASSERT(x, m) if(!(x)) { fflush(stdout); fprintf(stderr, "error: %s\n", m); perror("error"); fflush(stderr); exit(1); }
|
||||
|
||||
extern char endianness;
|
||||
|
||||
static inline void flipEndian(unsigned char* x, int length) {
|
||||
int i;
|
||||
unsigned char tmp;
|
||||
|
||||
if(endianness == IS_BIG_ENDIAN) {
|
||||
return;
|
||||
} else {
|
||||
for(i = 0; i < (length / 2); i++) {
|
||||
tmp = x[i];
|
||||
x[i] = x[length - i - 1];
|
||||
x[length - i - 1] = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline void flipEndianLE(unsigned char* x, int length) {
|
||||
int i;
|
||||
unsigned char tmp;
|
||||
|
||||
if(endianness == IS_LITTLE_ENDIAN) {
|
||||
return;
|
||||
} else {
|
||||
for(i = 0; i < (length / 2); i++) {
|
||||
tmp = x[i];
|
||||
x[i] = x[length - i - 1];
|
||||
x[length - i - 1] = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline void hexToBytes(const char* hex, uint8_t** buffer, size_t* bytes) {
|
||||
*bytes = strlen(hex) / 2;
|
||||
*buffer = (uint8_t*) malloc(*bytes);
|
||||
size_t i;
|
||||
for(i = 0; i < *bytes; i++) {
|
||||
uint32_t byte;
|
||||
sscanf(hex, "%2x", &byte);
|
||||
(*buffer)[i] = byte;
|
||||
hex += 2;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void hexToInts(const char* hex, unsigned int** buffer, size_t* bytes) {
|
||||
*bytes = strlen(hex) / 2;
|
||||
*buffer = (unsigned int*) malloc((*bytes) * sizeof(int));
|
||||
size_t i;
|
||||
for(i = 0; i < *bytes; i++) {
|
||||
sscanf(hex, "%2x", &((*buffer)[i]));
|
||||
hex += 2;
|
||||
}
|
||||
}
|
||||
|
||||
struct io_func_struct;
|
||||
|
||||
typedef int (*readFunc)(struct io_func_struct* io, off_t location, size_t size, void *buffer);
|
||||
typedef int (*writeFunc)(struct io_func_struct* io, off_t location, size_t size, void *buffer);
|
||||
typedef void (*closeFunc)(struct io_func_struct* io);
|
||||
|
||||
typedef struct io_func_struct {
|
||||
void* data;
|
||||
readFunc read;
|
||||
writeFunc write;
|
||||
closeFunc close;
|
||||
} io_func;
|
||||
|
||||
struct AbstractFile;
|
||||
|
||||
unsigned char* decodeBase64(char* toDecode, size_t* dataLength);
|
||||
void writeBase64(struct AbstractFile* file, unsigned char* data, size_t dataLength, int tabLength, int width);
|
||||
char* convertBase64(unsigned char* data, size_t dataLength, int tabLength, int width);
|
||||
|
||||
#endif
|
@ -0,0 +1,342 @@
|
||||
#ifndef DMG_H
|
||||
#define DMG_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <hfs/hfsplus.h>
|
||||
#include "abstractfile.h"
|
||||
|
||||
#define CHECKSUM_CRC32 0x00000002
|
||||
#define CHECKSUM_MKBLOCK 0x0002
|
||||
#define CHECKSUM_NONE 0x0000
|
||||
|
||||
#define BLOCK_ZLIB 0x80000005
|
||||
#define BLOCK_RAW 0x00000001
|
||||
#define BLOCK_IGNORE 0x00000002
|
||||
#define BLOCK_COMMENT 0x7FFFFFFE
|
||||
#define BLOCK_TERMINATOR 0xFFFFFFFF
|
||||
|
||||
#define SECTOR_SIZE 512
|
||||
|
||||
#define DRIVER_DESCRIPTOR_SIGNATURE 0x4552
|
||||
#define APPLE_PARTITION_MAP_SIGNATURE 0x504D
|
||||
#define UDIF_BLOCK_SIGNATURE 0x6D697368
|
||||
#define KOLY_SIGNATURE 0x6B6F6C79
|
||||
#define HFSX_SIGNATURE 0x4858
|
||||
|
||||
#define ATTRIBUTE_HDIUTIL 0x0050
|
||||
|
||||
#define HFSX_VOLUME_TYPE "Apple_HFSX"
|
||||
|
||||
#define DDM_SIZE 0x1
|
||||
#define PARTITION_SIZE 0x3f
|
||||
#define ATAPI_SIZE 0x8
|
||||
#define FREE_SIZE 0xa
|
||||
#define EXTRA_SIZE (ATAPI_OFFSET + ATAPI_SIZE + FREE_SIZE)
|
||||
|
||||
#define DDM_OFFSET 0x0
|
||||
#define PARTITION_OFFSET (DDM_SIZE)
|
||||
#define ATAPI_OFFSET 64
|
||||
#define USER_OFFSET (ATAPI_OFFSET + ATAPI_SIZE)
|
||||
|
||||
#define BOOTCODE_DMMY 0x444D4D59
|
||||
#define BOOTCODE_GOON 0x676F6F6E
|
||||
|
||||
enum {
|
||||
kUDIFFlagsFlattened = 1
|
||||
};
|
||||
|
||||
enum {
|
||||
kUDIFDeviceImageType = 1,
|
||||
kUDIFPartitionImageType = 2
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
uint32_t type;
|
||||
uint32_t size;
|
||||
uint32_t data[0x20];
|
||||
} __attribute__((__packed__)) UDIFChecksum;
|
||||
|
||||
typedef struct {
|
||||
uint32_t data1; /* smallest */
|
||||
uint32_t data2;
|
||||
uint32_t data3;
|
||||
uint32_t data4; /* largest */
|
||||
} __attribute__((__packed__)) UDIFID;
|
||||
|
||||
typedef struct {
|
||||
uint32_t fUDIFSignature;
|
||||
uint32_t fUDIFVersion;
|
||||
uint32_t fUDIFHeaderSize;
|
||||
uint32_t fUDIFFlags;
|
||||
|
||||
uint64_t fUDIFRunningDataForkOffset;
|
||||
uint64_t fUDIFDataForkOffset;
|
||||
uint64_t fUDIFDataForkLength;
|
||||
uint64_t fUDIFRsrcForkOffset;
|
||||
uint64_t fUDIFRsrcForkLength;
|
||||
|
||||
uint32_t fUDIFSegmentNumber;
|
||||
uint32_t fUDIFSegmentCount;
|
||||
UDIFID fUDIFSegmentID; /* a 128-bit number like a GUID, but does not seem to be a OSF GUID, since it doesn't have the proper versioning byte */
|
||||
|
||||
UDIFChecksum fUDIFDataForkChecksum;
|
||||
|
||||
uint64_t fUDIFXMLOffset;
|
||||
uint64_t fUDIFXMLLength;
|
||||
|
||||
uint8_t reserved1[0x78]; /* this is actually the perfect amount of space to store every thing in this struct until the checksum */
|
||||
|
||||
UDIFChecksum fUDIFMasterChecksum;
|
||||
|
||||
uint32_t fUDIFImageVariant;
|
||||
uint64_t fUDIFSectorCount;
|
||||
|
||||
uint32_t reserved2;
|
||||
uint32_t reserved3;
|
||||
uint32_t reserved4;
|
||||
|
||||
} __attribute__((__packed__)) UDIFResourceFile;
|
||||
|
||||
typedef struct {
|
||||
uint32_t type;
|
||||
uint32_t reserved;
|
||||
uint64_t sectorStart;
|
||||
uint64_t sectorCount;
|
||||
uint64_t compOffset;
|
||||
uint64_t compLength;
|
||||
} __attribute__((__packed__)) BLKXRun;
|
||||
|
||||
typedef struct {
|
||||
uint16_t version; /* set to 5 */
|
||||
uint32_t isHFS; /* first dword of v53(ImageInfoRec): Set to 1 if it's a HFS or HFS+ partition -- duh. */
|
||||
uint32_t unknown1; /* second dword of v53: seems to be garbage if it's HFS+, stuff related to HFS embedded if it's that*/
|
||||
uint8_t dataLen; /* length of data that proceeds, comes right before the data in ImageInfoRec. Always set to 0 for HFS, HFS+ */
|
||||
uint8_t data[255]; /* other data from v53, dataLen + 1 bytes, the rest NULL filled... a string? Not set for HFS, HFS+ */
|
||||
uint32_t unknown2; /* 8 bytes before volumeModified in v53, seems to be always set to 0 for HFS, HFS+ */
|
||||
uint32_t unknown3; /* 4 bytes before volumeModified in v53, seems to be always set to 0 for HFS, HFS+ */
|
||||
uint32_t volumeModified; /* offset 272 in v53 */
|
||||
uint32_t unknown4; /* always seems to be 0 for UDIF */
|
||||
uint16_t volumeSignature; /* HX in our case */
|
||||
uint16_t sizePresent; /* always set to 1 */
|
||||
} __attribute__((__packed__)) SizeResource;
|
||||
|
||||
typedef struct {
|
||||
uint16_t version; /* set to 1 */
|
||||
uint32_t type; /* set to 0x2 for MKBlockChecksum */
|
||||
uint32_t checksum;
|
||||
} __attribute__((__packed__)) CSumResource;
|
||||
|
||||
typedef struct NSizResource {
|
||||
char isVolume;
|
||||
unsigned char* sha1Digest;
|
||||
uint32_t blockChecksum2;
|
||||
uint32_t bytes;
|
||||
uint32_t modifyDate;
|
||||
uint32_t partitionNumber;
|
||||
uint32_t version;
|
||||
uint32_t volumeSignature;
|
||||
struct NSizResource* next;
|
||||
} NSizResource;
|
||||
|
||||
#define DDM_DESCRIPTOR 0xFFFFFFFF
|
||||
#define ENTIRE_DEVICE_DESCRIPTOR 0xFFFFFFFE
|
||||
|
||||
typedef struct {
|
||||
uint32_t fUDIFBlocksSignature;
|
||||
uint32_t infoVersion;
|
||||
uint64_t firstSectorNumber;
|
||||
uint64_t sectorCount;
|
||||
|
||||
uint64_t dataStart;
|
||||
uint32_t decompressBufferRequested;
|
||||
uint32_t blocksDescriptor;
|
||||
|
||||
uint32_t reserved1;
|
||||
uint32_t reserved2;
|
||||
uint32_t reserved3;
|
||||
uint32_t reserved4;
|
||||
uint32_t reserved5;
|
||||
uint32_t reserved6;
|
||||
|
||||
UDIFChecksum checksum;
|
||||
|
||||
uint32_t blocksRunCount;
|
||||
BLKXRun runs[0];
|
||||
} __attribute__((__packed__)) BLKXTable;
|
||||
|
||||
typedef struct {
|
||||
uint32_t ddBlock;
|
||||
uint16_t ddSize;
|
||||
uint16_t ddType;
|
||||
} __attribute__((__packed__)) DriverDescriptor;
|
||||
|
||||
typedef struct {
|
||||
uint16_t pmSig;
|
||||
uint16_t pmSigPad;
|
||||
uint32_t pmMapBlkCnt;
|
||||
uint32_t pmPyPartStart;
|
||||
uint32_t pmPartBlkCnt;
|
||||
unsigned char pmPartName[32];
|
||||
unsigned char pmParType[32];
|
||||
uint32_t pmLgDataStart;
|
||||
uint32_t pmDataCnt;
|
||||
uint32_t pmPartStatus;
|
||||
uint32_t pmLgBootStart;
|
||||
uint32_t pmBootSize;
|
||||
uint32_t pmBootAddr;
|
||||
uint32_t pmBootAddr2;
|
||||
uint32_t pmBootEntry;
|
||||
uint32_t pmBootEntry2;
|
||||
uint32_t pmBootCksum;
|
||||
unsigned char pmProcessor[16];
|
||||
uint32_t bootCode;
|
||||
uint16_t pmPad[186];
|
||||
} __attribute__((__packed__)) Partition;
|
||||
|
||||
typedef struct {
|
||||
uint16_t sbSig;
|
||||
uint16_t sbBlkSize;
|
||||
uint32_t sbBlkCount;
|
||||
uint16_t sbDevType;
|
||||
uint16_t sbDevId;
|
||||
uint32_t sbData;
|
||||
uint16_t sbDrvrCount;
|
||||
uint32_t ddBlock;
|
||||
uint16_t ddSize;
|
||||
uint16_t ddType;
|
||||
DriverDescriptor ddPad[0];
|
||||
} __attribute__((__packed__)) DriverDescriptorRecord;
|
||||
|
||||
typedef struct ResourceData {
|
||||
uint32_t attributes;
|
||||
unsigned char* data;
|
||||
size_t dataLength;
|
||||
int id;
|
||||
char* name;
|
||||
struct ResourceData* next;
|
||||
} ResourceData;
|
||||
|
||||
typedef void (*FlipDataFunc)(unsigned char* data, char out);
|
||||
typedef void (*ChecksumFunc)(void* ckSum, const unsigned char* data, size_t len);
|
||||
|
||||
typedef struct ResourceKey {
|
||||
unsigned char* key;
|
||||
ResourceData* data;
|
||||
struct ResourceKey* next;
|
||||
FlipDataFunc flipData;
|
||||
} ResourceKey;
|
||||
|
||||
#define SHA1_DIGEST_SIZE 20
|
||||
|
||||
typedef struct {
|
||||
uint32_t state[5];
|
||||
uint32_t count[2];
|
||||
uint8_t buffer[64];
|
||||
} SHA1_CTX;
|
||||
|
||||
typedef struct {
|
||||
uint32_t block;
|
||||
uint32_t crc;
|
||||
SHA1_CTX sha1;
|
||||
} ChecksumToken;
|
||||
|
||||
static inline uint32_t readUInt32(AbstractFile* file) {
|
||||
uint32_t data;
|
||||
|
||||
ASSERT(file->read(file, &data, sizeof(data)) == sizeof(data), "fread");
|
||||
FLIPENDIAN(data);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static inline void writeUInt32(AbstractFile* file, uint32_t data) {
|
||||
FLIPENDIAN(data);
|
||||
ASSERT(file->write(file, &data, sizeof(data)) == sizeof(data), "fwrite");
|
||||
}
|
||||
|
||||
static inline uint32_t readUInt64(AbstractFile* file) {
|
||||
uint64_t data;
|
||||
|
||||
ASSERT(file->read(file, &data, sizeof(data)) == sizeof(data), "fread");
|
||||
FLIPENDIAN(data);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static inline void writeUInt64(AbstractFile* file, uint64_t data) {
|
||||
FLIPENDIAN(data);
|
||||
ASSERT(file->write(file, &data, sizeof(data)) == sizeof(data), "fwrite");
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void outResources(AbstractFile* file, AbstractFile* out);
|
||||
|
||||
uint32_t CRC32Checksum(uint32_t* crc, const unsigned char *buf, size_t len);
|
||||
uint32_t MKBlockChecksum(uint32_t* ckSum, const unsigned char* data, size_t len);
|
||||
|
||||
void BlockSHA1CRC(void* token, const unsigned char* data, size_t len);
|
||||
void BlockCRC(void* token, const unsigned char* data, size_t len);
|
||||
void CRCProxy(void* token, const unsigned char* data, size_t len);
|
||||
|
||||
void SHA1Init(SHA1_CTX* context);
|
||||
void SHA1Update(SHA1_CTX* context, const uint8_t* data, const size_t len);
|
||||
void SHA1Final(uint8_t digest[SHA1_DIGEST_SIZE], SHA1_CTX* context);
|
||||
|
||||
void flipUDIFChecksum(UDIFChecksum* o, char out);
|
||||
void readUDIFChecksum(AbstractFile* file, UDIFChecksum* o);
|
||||
void writeUDIFChecksum(AbstractFile* file, UDIFChecksum* o);
|
||||
void readUDIFID(AbstractFile* file, UDIFID* o);
|
||||
void writeUDIFID(AbstractFile* file, UDIFID* o);
|
||||
void readUDIFResourceFile(AbstractFile* file, UDIFResourceFile* o);
|
||||
void writeUDIFResourceFile(AbstractFile* file, UDIFResourceFile* o);
|
||||
|
||||
ResourceKey* readResources(AbstractFile* file, UDIFResourceFile* resourceFile);
|
||||
void writeResources(AbstractFile* file, ResourceKey* resources);
|
||||
void releaseResources(ResourceKey* resources);
|
||||
|
||||
NSizResource* readNSiz(ResourceKey* resources);
|
||||
ResourceKey* writeNSiz(NSizResource* nSiz);
|
||||
void releaseNSiz(NSizResource* nSiz);
|
||||
|
||||
extern const char* plistHeader;
|
||||
extern const char* plistFooter;
|
||||
|
||||
ResourceKey* getResourceByKey(ResourceKey* resources, const char* key);
|
||||
ResourceData* getDataByID(ResourceKey* resource, int id);
|
||||
ResourceKey* insertData(ResourceKey* resources, const char* key, int id, const char* name, const char* data, size_t dataLength, uint32_t attributes);
|
||||
ResourceKey* makePlst();
|
||||
ResourceKey* makeSize(HFSPlusVolumeHeader* volumeHeader);
|
||||
|
||||
void flipDriverDescriptorRecord(DriverDescriptorRecord* record, char out);
|
||||
void flipPartition(Partition* partition, char out, unsigned int BlockSize);
|
||||
void flipPartitionMultiple(Partition* partition, char multiple, char out, unsigned int BlockSize);
|
||||
|
||||
void readDriverDescriptorMap(AbstractFile* file, ResourceKey* resources);
|
||||
DriverDescriptorRecord* createDriverDescriptorMap(uint32_t numSectors, unsigned int BlockSize);
|
||||
int writeDriverDescriptorMap(int pNum, AbstractFile* file, DriverDescriptorRecord* DDM, unsigned int BlockSize, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources);
|
||||
void readApplePartitionMap(AbstractFile* file, ResourceKey* resources, unsigned int BlockSize);
|
||||
Partition* createApplePartitionMap(uint32_t numSectors, const char* volumeType, unsigned int BlockSize);
|
||||
int writeApplePartitionMap(int pNum, AbstractFile* file, Partition* partitions, unsigned int BlockSize, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources, NSizResource** nsizIn);
|
||||
int writeATAPI(int pNum, AbstractFile* file, unsigned int BlockSize, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources, NSizResource** nsizIn);
|
||||
int writeFreePartition(int pNum, AbstractFile* outFile, uint32_t offset, uint32_t numSectors, ResourceKey** resources);
|
||||
|
||||
void extractBLKX(AbstractFile* in, AbstractFile* out, BLKXTable* blkx);
|
||||
BLKXTable* insertBLKX(AbstractFile* out, AbstractFile* in, uint32_t firstSectorNumber, uint32_t numSectors, uint32_t blocksDescriptor,
|
||||
uint32_t checksumType, ChecksumFunc uncompressedChk, void* uncompressedChkToken, ChecksumFunc compressedChk,
|
||||
void* compressedChkToken, Volume* volume, int addComment);
|
||||
|
||||
|
||||
int extractDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, int partNum);
|
||||
int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, unsigned int BlockSize);
|
||||
int convertToISO(AbstractFile* abstractIn, AbstractFile* abstractOut);
|
||||
int convertToDMG(AbstractFile* abstractIn, AbstractFile* abstractOut);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -0,0 +1,21 @@
|
||||
/*
|
||||
* dmgfile.h
|
||||
* libdmg-hfsplus
|
||||
*
|
||||
*/
|
||||
|
||||
#include <dmg/dmg.h>
|
||||
|
||||
io_func* openDmgFile(AbstractFile* dmg);
|
||||
io_func* openDmgFilePartition(AbstractFile* dmg, int partition);
|
||||
|
||||
typedef struct DMG {
|
||||
AbstractFile* dmg;
|
||||
ResourceKey* resources;
|
||||
uint32_t numBLKX;
|
||||
BLKXTable** blkx;
|
||||
void* runData;
|
||||
uint64_t runStart;
|
||||
uint64_t runEnd;
|
||||
uint64_t offset;
|
||||
} DMG;
|
@ -0,0 +1,19 @@
|
||||
#ifndef DMGLIB_H
|
||||
#define DMGLIB_H
|
||||
|
||||
#include <dmg/dmg.h>
|
||||
#include "abstractfile.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
int extractDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, int partNum);
|
||||
int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, unsigned int BlockSize);
|
||||
|
||||
int convertToDMG(AbstractFile* abstractIn, AbstractFile* abstractOut);
|
||||
int convertToISO(AbstractFile* abstractIn, AbstractFile* abstractOut);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -0,0 +1,98 @@
|
||||
#ifndef FILEVAULT_H
|
||||
#define FILEVAULT_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "dmg.h"
|
||||
|
||||
#ifdef HAVE_CRYPT
|
||||
|
||||
#include <openssl/hmac.h>
|
||||
#include <openssl/aes.h>
|
||||
|
||||
#define FILEVAULT_CIPHER_KEY_LENGTH 16
|
||||
#define FILEVAULT_CIPHER_BLOCKSIZE 16
|
||||
#define FILEVAULT_CHUNK_SIZE 4096
|
||||
#define FILEVAULT_PBKDF2_ITER_COUNT 1000
|
||||
#define FILEVAULT_MSGDGST_LENGTH 20
|
||||
|
||||
/*
|
||||
* Information about the FileVault format was yoinked from vfdecrypt, which was written by Ralf-Philipp Weinmann <ralf@coderpunks.org>,
|
||||
* Jacob Appelbaum <jacob@appelbaum.net>, and Christian Fromme <kaner@strace.org>
|
||||
*/
|
||||
|
||||
#define FILEVAULT_V2_SIGNATURE 0x656e637263647361ULL
|
||||
|
||||
typedef struct FileVaultV1Header {
|
||||
uint8_t padding1[48];
|
||||
uint32_t kdfIterationCount;
|
||||
uint32_t kdfSaltLen;
|
||||
uint8_t kdfSalt[48];
|
||||
uint8_t unwrapIV[0x20];
|
||||
uint32_t wrappedAESKeyLen;
|
||||
uint8_t wrappedAESKey[296];
|
||||
uint32_t wrappedHMACSHA1KeyLen;
|
||||
uint8_t wrappedHMACSHA1Key[300];
|
||||
uint32_t integrityKeyLen;
|
||||
uint8_t integrityKey[48];
|
||||
uint8_t padding2[484];
|
||||
} __attribute__((__packed__)) FileVaultV1Header;
|
||||
|
||||
typedef struct FileVaultV2Header {
|
||||
uint64_t signature;
|
||||
uint32_t version;
|
||||
uint32_t encIVSize;
|
||||
uint32_t unk1;
|
||||
uint32_t unk2;
|
||||
uint32_t unk3;
|
||||
uint32_t unk4;
|
||||
uint32_t unk5;
|
||||
UDIFID uuid;
|
||||
uint32_t blockSize;
|
||||
uint64_t dataSize;
|
||||
uint64_t dataOffset;
|
||||
uint8_t padding[0x260];
|
||||
uint32_t kdfAlgorithm;
|
||||
uint32_t kdfPRNGAlgorithm;
|
||||
uint32_t kdfIterationCount;
|
||||
uint32_t kdfSaltLen;
|
||||
uint8_t kdfSalt[0x20];
|
||||
uint32_t blobEncIVSize;
|
||||
uint8_t blobEncIV[0x20];
|
||||
uint32_t blobEncKeyBits;
|
||||
uint32_t blobEncAlgorithm;
|
||||
uint32_t blobEncPadding;
|
||||
uint32_t blobEncMode;
|
||||
uint32_t encryptedKeyblobSize;
|
||||
uint8_t encryptedKeyblob[0x30];
|
||||
} __attribute__((__packed__)) FileVaultV2Header;
|
||||
|
||||
typedef struct FileVaultInfo {
|
||||
union {
|
||||
FileVaultV1Header v1;
|
||||
FileVaultV2Header v2;
|
||||
} header;
|
||||
|
||||
uint8_t version;
|
||||
uint64_t dataOffset;
|
||||
uint64_t dataSize;
|
||||
uint32_t blockSize;
|
||||
|
||||
AbstractFile* file;
|
||||
|
||||
HMAC_CTX hmacCTX;
|
||||
AES_KEY aesKey;
|
||||
AES_KEY aesEncKey;
|
||||
|
||||
off_t offset;
|
||||
|
||||
uint32_t curChunk;
|
||||
unsigned char chunk[FILEVAULT_CHUNK_SIZE];
|
||||
|
||||
char dirty;
|
||||
char headerDirty;
|
||||
} FileVaultInfo;
|
||||
#endif
|
||||
|
||||
AbstractFile* createAbstractFileFromFileVault(AbstractFile* file, const char* key);
|
||||
|
||||
#endif
|
@ -0,0 +1,71 @@
|
||||
#ifndef HFSCOMPRESS_H
|
||||
#define HFSCOMPRESS_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "common.h"
|
||||
|
||||
#define CMPFS_MAGIC 0x636D7066
|
||||
|
||||
typedef struct HFSPlusDecmpfs {
|
||||
uint32_t magic;
|
||||
uint32_t flags;
|
||||
uint64_t size;
|
||||
uint8_t data[0];
|
||||
} __attribute__ ((packed)) HFSPlusDecmpfs;
|
||||
|
||||
typedef struct HFSPlusCmpfRsrcHead {
|
||||
uint32_t headerSize;
|
||||
uint32_t totalSize;
|
||||
uint32_t dataSize;
|
||||
uint32_t flags;
|
||||
} __attribute__ ((packed)) HFSPlusCmpfRsrcHead;
|
||||
|
||||
typedef struct HFSPlusCmpfRsrcBlock {
|
||||
uint32_t offset;
|
||||
uint32_t size;
|
||||
} __attribute__ ((packed)) HFSPlusCmpfRsrcBlock;
|
||||
|
||||
typedef struct HFSPlusCmpfRsrcBlockHead {
|
||||
uint32_t dataSize;
|
||||
uint32_t numBlocks;
|
||||
HFSPlusCmpfRsrcBlock blocks[0];
|
||||
} __attribute__ ((packed)) HFSPlusCmpfRsrcBlockHead;
|
||||
|
||||
typedef struct HFSPlusCmpfEnd {
|
||||
uint32_t pad[6];
|
||||
uint16_t unk1;
|
||||
uint16_t unk2;
|
||||
uint16_t unk3;
|
||||
uint32_t magic;
|
||||
uint32_t flags;
|
||||
uint64_t size;
|
||||
uint32_t unk4;
|
||||
} __attribute__ ((packed)) HFSPlusCmpfEnd;
|
||||
|
||||
typedef struct HFSPlusCompressed {
|
||||
Volume* volume;
|
||||
HFSPlusCatalogFile* file;
|
||||
io_func* io;
|
||||
size_t decmpfsSize;
|
||||
HFSPlusDecmpfs* decmpfs;
|
||||
|
||||
HFSPlusCmpfRsrcHead rsrcHead;
|
||||
HFSPlusCmpfRsrcBlockHead* blocks;
|
||||
|
||||
int dirty;
|
||||
|
||||
uint8_t* cached;
|
||||
uint32_t cachedStart;
|
||||
uint32_t cachedEnd;
|
||||
} HFSPlusCompressed;
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void flipHFSPlusDecmpfs(HFSPlusDecmpfs* compressData);
|
||||
io_func* openHFSPlusCompressed(Volume* volume, HFSPlusCatalogFile* file);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -0,0 +1,25 @@
|
||||
#include "common.h"
|
||||
#include "hfsplus.h"
|
||||
#include "abstractfile.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void writeToFile(HFSPlusCatalogFile* file, AbstractFile* output, Volume* volume);
|
||||
void writeToHFSFile(HFSPlusCatalogFile* file, AbstractFile* input, Volume* volume);
|
||||
void get_hfs(Volume* volume, const char* inFileName, AbstractFile* output);
|
||||
int add_hfs(Volume* volume, AbstractFile* inFile, const char* outFileName);
|
||||
void grow_hfs(Volume* volume, uint64_t newSize);
|
||||
void removeAllInFolder(HFSCatalogNodeID folderID, Volume* volume, const char* parentName);
|
||||
void addAllInFolder(HFSCatalogNodeID folderID, Volume* volume, const char* parentName);
|
||||
void addall_hfs(Volume* volume, const char* dirToMerge, const char* dest);
|
||||
void extractAllInFolder(HFSCatalogNodeID folderID, Volume* volume);
|
||||
int copyAcrossVolumes(Volume* volume1, Volume* volume2, char* path1, char* path2);
|
||||
|
||||
void hfs_untar(Volume* volume, AbstractFile* tarFile);
|
||||
void hfs_ls(Volume* volume, const char* path);
|
||||
void hfs_setsilence(int s);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -0,0 +1,586 @@
|
||||
#ifndef HFSPLUS_H
|
||||
#define HFSPLUS_H
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#define READ(a, b, c, d) ((*((a)->read))(a, b, c, d))
|
||||
#define WRITE(a, b, c, d) ((*((a)->write))(a, b, c, d))
|
||||
#define CLOSE(a) ((*((a)->close))(a))
|
||||
#define COMPARE(a, b, c) ((*((a)->compare))(b, c))
|
||||
#define READ_KEY(a, b, c) ((*((a)->keyRead))(b, c))
|
||||
#define WRITE_KEY(a, b, c, d) ((*((a)->keyWrite))(b, c, d))
|
||||
#define READ_DATA(a, b, c) ((*((a)->dataRead))(b, c))
|
||||
|
||||
struct BTKey {
|
||||
uint16_t keyLength;
|
||||
unsigned char data[0];
|
||||
} __attribute__((__packed__));
|
||||
|
||||
typedef struct BTKey BTKey;
|
||||
|
||||
typedef BTKey* (*dataReadFunc)(off_t offset, struct io_func_struct* io);
|
||||
typedef void (*keyPrintFunc)(BTKey* toPrint);
|
||||
typedef int (*keyWriteFunc)(off_t offset, BTKey* toWrite, struct io_func_struct* io);
|
||||
typedef int (*compareFunc)(BTKey* left, BTKey* right);
|
||||
|
||||
#define STR_SIZE(str) (sizeof(uint16_t) + (sizeof(uint16_t) * (str).length))
|
||||
|
||||
#ifndef __HFS_FORMAT__
|
||||
|
||||
typedef uint32_t HFSCatalogNodeID;
|
||||
|
||||
enum {
|
||||
kHFSRootParentID = 1,
|
||||
kHFSRootFolderID = 2,
|
||||
kHFSExtentsFileID = 3,
|
||||
kHFSCatalogFileID = 4,
|
||||
kHFSBadBlockFileID = 5,
|
||||
kHFSAllocationFileID = 6,
|
||||
kHFSStartupFileID = 7,
|
||||
kHFSAttributesFileID = 8,
|
||||
kHFSRepairCatalogFileID = 14,
|
||||
kHFSBogusExtentFileID = 15,
|
||||
kHFSFirstUserCatalogNodeID = 16
|
||||
};
|
||||
|
||||
struct HFSUniStr255 {
|
||||
uint16_t length;
|
||||
uint16_t unicode[255];
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSUniStr255 HFSUniStr255;
|
||||
typedef const HFSUniStr255 *ConstHFSUniStr255Param;
|
||||
|
||||
struct HFSPlusExtentDescriptor {
|
||||
uint32_t startBlock;
|
||||
uint32_t blockCount;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusExtentDescriptor HFSPlusExtentDescriptor;
|
||||
|
||||
typedef HFSPlusExtentDescriptor HFSPlusExtentRecord[8];
|
||||
|
||||
struct HFSPlusForkData {
|
||||
uint64_t logicalSize;
|
||||
uint32_t clumpSize;
|
||||
uint32_t totalBlocks;
|
||||
HFSPlusExtentRecord extents;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusForkData HFSPlusForkData;
|
||||
|
||||
struct HFSPlusVolumeHeader {
|
||||
uint16_t signature;
|
||||
uint16_t version;
|
||||
uint32_t attributes;
|
||||
uint32_t lastMountedVersion;
|
||||
uint32_t journalInfoBlock;
|
||||
|
||||
uint32_t createDate;
|
||||
uint32_t modifyDate;
|
||||
uint32_t backupDate;
|
||||
uint32_t checkedDate;
|
||||
|
||||
uint32_t fileCount;
|
||||
uint32_t folderCount;
|
||||
|
||||
uint32_t blockSize;
|
||||
uint32_t totalBlocks;
|
||||
uint32_t freeBlocks;
|
||||
|
||||
uint32_t nextAllocation;
|
||||
uint32_t rsrcClumpSize;
|
||||
uint32_t dataClumpSize;
|
||||
HFSCatalogNodeID nextCatalogID;
|
||||
|
||||
uint32_t writeCount;
|
||||
uint64_t encodingsBitmap;
|
||||
|
||||
uint32_t finderInfo[8];
|
||||
|
||||
HFSPlusForkData allocationFile;
|
||||
HFSPlusForkData extentsFile;
|
||||
HFSPlusForkData catalogFile;
|
||||
HFSPlusForkData attributesFile;
|
||||
HFSPlusForkData startupFile;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusVolumeHeader HFSPlusVolumeHeader;
|
||||
|
||||
enum {
|
||||
kBTLeafNode = -1,
|
||||
kBTIndexNode = 0,
|
||||
kBTHeaderNode = 1,
|
||||
kBTMapNode = 2
|
||||
};
|
||||
|
||||
struct BTNodeDescriptor {
|
||||
uint32_t fLink;
|
||||
uint32_t bLink;
|
||||
int8_t kind;
|
||||
uint8_t height;
|
||||
uint16_t numRecords;
|
||||
uint16_t reserved;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct BTNodeDescriptor BTNodeDescriptor;
|
||||
|
||||
#define kHFSCaseFolding 0xCF
|
||||
#define kHFSBinaryCompare 0xBC
|
||||
|
||||
struct BTHeaderRec {
|
||||
uint16_t treeDepth;
|
||||
uint32_t rootNode;
|
||||
uint32_t leafRecords;
|
||||
uint32_t firstLeafNode;
|
||||
uint32_t lastLeafNode;
|
||||
uint16_t nodeSize;
|
||||
uint16_t maxKeyLength;
|
||||
uint32_t totalNodes;
|
||||
uint32_t freeNodes;
|
||||
uint16_t reserved1;
|
||||
uint32_t clumpSize; // misaligned
|
||||
uint8_t btreeType;
|
||||
uint8_t keyCompareType;
|
||||
uint32_t attributes; // long aligned again
|
||||
uint32_t reserved3[16];
|
||||
} __attribute__((__packed__));
|
||||
typedef struct BTHeaderRec BTHeaderRec;
|
||||
|
||||
struct HFSPlusExtentKey {
|
||||
uint16_t keyLength;
|
||||
uint8_t forkType;
|
||||
uint8_t pad;
|
||||
HFSCatalogNodeID fileID;
|
||||
uint32_t startBlock;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusExtentKey HFSPlusExtentKey;
|
||||
|
||||
struct HFSPlusCatalogKey {
|
||||
uint16_t keyLength;
|
||||
HFSCatalogNodeID parentID;
|
||||
HFSUniStr255 nodeName;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusCatalogKey HFSPlusCatalogKey;
|
||||
|
||||
#ifndef __MACTYPES__
|
||||
struct Point {
|
||||
int16_t v;
|
||||
int16_t h;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct Point Point;
|
||||
|
||||
struct Rect {
|
||||
int16_t top;
|
||||
int16_t left;
|
||||
int16_t bottom;
|
||||
int16_t right;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct Rect Rect;
|
||||
|
||||
/* OSType is a 32-bit value made by packing four 1-byte characters
|
||||
together. */
|
||||
typedef uint32_t FourCharCode;
|
||||
typedef FourCharCode OSType;
|
||||
|
||||
#endif
|
||||
|
||||
/* Finder flags (finderFlags, fdFlags and frFlags) */
|
||||
enum {
|
||||
kIsOnDesk = 0x0001, /* Files and folders (System 6) */
|
||||
kColor = 0x000E, /* Files and folders */
|
||||
kIsShared = 0x0040, /* Files only (Applications only) If */
|
||||
/* clear, the application needs */
|
||||
/* to write to its resource fork, */
|
||||
/* and therefore cannot be shared */
|
||||
/* on a server */
|
||||
kHasNoINITs = 0x0080, /* Files only (Extensions/Control */
|
||||
/* Panels only) */
|
||||
/* This file contains no INIT resource */
|
||||
kHasBeenInited = 0x0100, /* Files only. Clear if the file */
|
||||
/* contains desktop database resources */
|
||||
/* ('BNDL', 'FREF', 'open', 'kind'...) */
|
||||
/* that have not been added yet. Set */
|
||||
/* only by the Finder. */
|
||||
/* Reserved for folders */
|
||||
kHasCustomIcon = 0x0400, /* Files and folders */
|
||||
kIsStationery = 0x0800, /* Files only */
|
||||
kNameLocked = 0x1000, /* Files and folders */
|
||||
kHasBundle = 0x2000, /* Files only */
|
||||
kIsInvisible = 0x4000, /* Files and folders */
|
||||
kIsAlias = 0x8000 /* Files only */
|
||||
};
|
||||
|
||||
/* Extended flags (extendedFinderFlags, fdXFlags and frXFlags) */
|
||||
enum {
|
||||
kExtendedFlagsAreInvalid = 0x8000, /* The other extended flags */
|
||||
/* should be ignored */
|
||||
kExtendedFlagHasCustomBadge = 0x0100, /* The file or folder has a */
|
||||
/* badge resource */
|
||||
kExtendedFlagHasRoutingInfo = 0x0004 /* The file contains routing */
|
||||
/* info resource */
|
||||
};
|
||||
|
||||
enum {
|
||||
kSymLinkFileType = 0x736C6E6B, /* 'slnk' */
|
||||
kSymLinkCreator = 0x72686170 /* 'rhap' */
|
||||
};
|
||||
|
||||
struct FileInfo {
|
||||
OSType fileType; /* The type of the file */
|
||||
OSType fileCreator; /* The file's creator */
|
||||
uint16_t finderFlags;
|
||||
Point location; /* File's location in the folder. */
|
||||
uint16_t reservedField;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct FileInfo FileInfo;
|
||||
|
||||
struct ExtendedFileInfo {
|
||||
int16_t reserved1[4];
|
||||
uint16_t extendedFinderFlags;
|
||||
int16_t reserved2;
|
||||
int32_t putAwayFolderID;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct ExtendedFileInfo ExtendedFileInfo;
|
||||
|
||||
struct FolderInfo {
|
||||
Rect windowBounds; /* The position and dimension of the */
|
||||
/* folder's window */
|
||||
uint16_t finderFlags;
|
||||
Point location; /* Folder's location in the parent */
|
||||
/* folder. If set to {0, 0}, the Finder */
|
||||
/* will place the item automatically */
|
||||
uint16_t reservedField;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct FolderInfo FolderInfo;
|
||||
|
||||
struct ExtendedFolderInfo {
|
||||
Point scrollPosition; /* Scroll position (for icon views) */
|
||||
int32_t reserved1;
|
||||
uint16_t extendedFinderFlags;
|
||||
int16_t reserved2;
|
||||
int32_t putAwayFolderID;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct ExtendedFolderInfo ExtendedFolderInfo;
|
||||
|
||||
#ifndef _STAT_H_
|
||||
#ifndef _SYS_STAT_H
|
||||
#define S_ISUID 0004000 /* set user id on execution */
|
||||
#define S_ISGID 0002000 /* set group id on execution */
|
||||
#define S_ISTXT 0001000 /* sticky bit */
|
||||
|
||||
#define S_IRWXU 0000700 /* RWX mask for owner */
|
||||
#define S_IRUSR 0000400 /* R for owner */
|
||||
#define S_IWUSR 0000200 /* W for owner */
|
||||
#define S_IXUSR 0000100 /* X for owner */
|
||||
|
||||
#define S_IRWXG 0000070 /* RWX mask for group */
|
||||
#define S_IRGRP 0000040 /* R for group */
|
||||
#define S_IWGRP 0000020 /* W for group */
|
||||
#define S_IXGRP 0000010 /* X for group */
|
||||
|
||||
#define S_IRWXO 0000007 /* RWX mask for other */
|
||||
#define S_IROTH 0000004 /* R for other */
|
||||
#define S_IWOTH 0000002 /* W for other */
|
||||
#define S_IXOTH 0000001 /* X for other */
|
||||
|
||||
#define S_IFMT 0170000 /* type of file mask */
|
||||
#define S_IFIFO 0010000 /* named pipe (fifo) */
|
||||
#define S_IFCHR 0020000 /* character special */
|
||||
#define S_IFDIR 0040000 /* directory */
|
||||
#define S_IFBLK 0060000 /* block special */
|
||||
#define S_IFREG 0100000 /* regular */
|
||||
#define S_IFLNK 0120000 /* symbolic link */
|
||||
#define S_IFSOCK 0140000 /* socket */
|
||||
#define S_IFWHT 0160000 /* whiteout */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define UF_COMPRESSED 040
|
||||
|
||||
struct HFSPlusBSDInfo {
|
||||
uint32_t ownerID;
|
||||
uint32_t groupID;
|
||||
uint8_t adminFlags;
|
||||
uint8_t ownerFlags;
|
||||
uint16_t fileMode;
|
||||
union {
|
||||
uint32_t iNodeNum;
|
||||
uint32_t linkCount;
|
||||
uint32_t rawDevice;
|
||||
} special;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusBSDInfo HFSPlusBSDInfo;
|
||||
|
||||
enum {
|
||||
kHFSPlusFolderRecord = 0x0001,
|
||||
kHFSPlusFileRecord = 0x0002,
|
||||
kHFSPlusFolderThreadRecord = 0x0003,
|
||||
kHFSPlusFileThreadRecord = 0x0004
|
||||
};
|
||||
|
||||
enum {
|
||||
kHFSFileLockedBit = 0x0000, /* file is locked and cannot be written to */
|
||||
kHFSFileLockedMask = 0x0001,
|
||||
|
||||
kHFSThreadExistsBit = 0x0001, /* a file thread record exists for this file */
|
||||
kHFSThreadExistsMask = 0x0002,
|
||||
|
||||
kHFSHasAttributesBit = 0x0002, /* object has extended attributes */
|
||||
kHFSHasAttributesMask = 0x0004,
|
||||
|
||||
kHFSHasSecurityBit = 0x0003, /* object has security data (ACLs) */
|
||||
kHFSHasSecurityMask = 0x0008,
|
||||
|
||||
kHFSHasFolderCountBit = 0x0004, /* only for HFSX, folder maintains a separate sub-folder count */
|
||||
kHFSHasFolderCountMask = 0x0010, /* (sum of folder records and directory hard links) */
|
||||
|
||||
kHFSHasLinkChainBit = 0x0005, /* has hardlink chain (inode or link) */
|
||||
kHFSHasLinkChainMask = 0x0020,
|
||||
|
||||
kHFSHasChildLinkBit = 0x0006, /* folder has a child that's a dir link */
|
||||
kHFSHasChildLinkMask = 0x0040
|
||||
};
|
||||
|
||||
struct HFSPlusCatalogFolder {
|
||||
int16_t recordType;
|
||||
uint16_t flags;
|
||||
uint32_t valence;
|
||||
HFSCatalogNodeID folderID;
|
||||
uint32_t createDate;
|
||||
uint32_t contentModDate;
|
||||
uint32_t attributeModDate;
|
||||
uint32_t accessDate;
|
||||
uint32_t backupDate;
|
||||
HFSPlusBSDInfo permissions;
|
||||
FolderInfo userInfo;
|
||||
ExtendedFolderInfo finderInfo;
|
||||
uint32_t textEncoding;
|
||||
uint32_t folderCount;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusCatalogFolder HFSPlusCatalogFolder;
|
||||
|
||||
struct HFSPlusCatalogFile {
|
||||
int16_t recordType;
|
||||
uint16_t flags;
|
||||
uint32_t reserved1;
|
||||
HFSCatalogNodeID fileID;
|
||||
uint32_t createDate;
|
||||
uint32_t contentModDate;
|
||||
uint32_t attributeModDate;
|
||||
uint32_t accessDate;
|
||||
uint32_t backupDate;
|
||||
HFSPlusBSDInfo permissions;
|
||||
FileInfo userInfo;
|
||||
ExtendedFileInfo finderInfo;
|
||||
uint32_t textEncoding;
|
||||
uint32_t reserved2;
|
||||
|
||||
HFSPlusForkData dataFork;
|
||||
HFSPlusForkData resourceFork;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusCatalogFile HFSPlusCatalogFile;
|
||||
|
||||
struct HFSPlusCatalogThread {
|
||||
int16_t recordType;
|
||||
int16_t reserved;
|
||||
HFSCatalogNodeID parentID;
|
||||
HFSUniStr255 nodeName;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusCatalogThread HFSPlusCatalogThread;
|
||||
|
||||
enum {
|
||||
kHFSPlusAttrInlineData = 0x10,
|
||||
kHFSPlusAttrForkData = 0x20,
|
||||
kHFSPlusAttrExtents = 0x30
|
||||
};
|
||||
|
||||
struct HFSPlusAttrForkData {
|
||||
uint32_t recordType;
|
||||
uint32_t reserved;
|
||||
HFSPlusForkData theFork;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusAttrForkData HFSPlusAttrForkData;
|
||||
|
||||
struct HFSPlusAttrExtents {
|
||||
uint32_t recordType;
|
||||
uint32_t reserved;
|
||||
HFSPlusExtentRecord extents;
|
||||
};
|
||||
typedef struct HFSPlusAttrExtents HFSPlusAttrExtents;
|
||||
|
||||
struct HFSPlusAttrData {
|
||||
uint32_t recordType;
|
||||
uint32_t reserved[2];
|
||||
uint32_t size;
|
||||
uint8_t data[0];
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusAttrData HFSPlusAttrData;
|
||||
|
||||
union HFSPlusAttrRecord {
|
||||
uint32_t recordType;
|
||||
HFSPlusAttrData attrData;
|
||||
HFSPlusAttrForkData forkData;
|
||||
HFSPlusAttrExtents overflowExtents;
|
||||
};
|
||||
typedef union HFSPlusAttrRecord HFSPlusAttrRecord;
|
||||
|
||||
struct HFSPlusAttrKey {
|
||||
uint16_t keyLength;
|
||||
uint16_t pad;
|
||||
uint32_t fileID;
|
||||
uint32_t startBlock;
|
||||
HFSUniStr255 name;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusAttrKey HFSPlusAttrKey;
|
||||
|
||||
enum {
|
||||
kHardLinkFileType = 0x686C6E6B, /* 'hlnk' */
|
||||
kHFSPlusCreator = 0x6866732B /* 'hfs+' */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
struct HFSPlusCatalogRecord {
|
||||
int16_t recordType;
|
||||
unsigned char data[0];
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusCatalogRecord HFSPlusCatalogRecord;
|
||||
|
||||
struct CatalogRecordList {
|
||||
HFSUniStr255 name;
|
||||
HFSPlusCatalogRecord* record;
|
||||
struct CatalogRecordList* next;
|
||||
};
|
||||
typedef struct CatalogRecordList CatalogRecordList;
|
||||
|
||||
struct XAttrList {
|
||||
char* name;
|
||||
struct XAttrList* next;
|
||||
};
|
||||
typedef struct XAttrList XAttrList;
|
||||
|
||||
struct Extent {
|
||||
uint32_t startBlock;
|
||||
uint32_t blockCount;
|
||||
struct Extent* next;
|
||||
};
|
||||
|
||||
typedef struct Extent Extent;
|
||||
|
||||
typedef struct {
|
||||
io_func* io;
|
||||
BTHeaderRec *headerRec;
|
||||
compareFunc compare;
|
||||
dataReadFunc keyRead;
|
||||
keyWriteFunc keyWrite;
|
||||
keyPrintFunc keyPrint;
|
||||
dataReadFunc dataRead;
|
||||
} BTree;
|
||||
|
||||
typedef struct {
|
||||
io_func* image;
|
||||
HFSPlusVolumeHeader* volumeHeader;
|
||||
|
||||
BTree* extentsTree;
|
||||
BTree* catalogTree;
|
||||
BTree* attrTree;
|
||||
io_func* allocationFile;
|
||||
HFSCatalogNodeID metadataDir;
|
||||
} Volume;
|
||||
|
||||
|
||||
typedef struct {
|
||||
HFSCatalogNodeID id;
|
||||
HFSPlusCatalogRecord* catalogRecord;
|
||||
Volume* volume;
|
||||
HFSPlusForkData* forkData;
|
||||
Extent* extents;
|
||||
} RawFile;
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void hfs_panic(const char* panicString);
|
||||
|
||||
void printUnicode(HFSUniStr255* str);
|
||||
char* unicodeToAscii(HFSUniStr255* str);
|
||||
|
||||
BTNodeDescriptor* readBTNodeDescriptor(uint32_t num, BTree* tree);
|
||||
|
||||
BTHeaderRec* readBTHeaderRec(io_func* io);
|
||||
|
||||
BTree* openBTree(io_func* io, compareFunc compare, dataReadFunc keyRead, keyWriteFunc keyWrite, keyPrintFunc keyPrint, dataReadFunc dataRead);
|
||||
|
||||
void closeBTree(BTree* tree);
|
||||
|
||||
off_t getRecordOffset(int num, uint32_t nodeNum, BTree* tree);
|
||||
|
||||
off_t getNodeNumberFromPointerRecord(off_t offset, io_func* io);
|
||||
|
||||
void* search(BTree* tree, BTKey* searchKey, int *exact, uint32_t *nodeNumber, int *recordNumber);
|
||||
|
||||
io_func* openFlatFile(const char* fileName);
|
||||
io_func* openFlatFileRO(const char* fileName);
|
||||
|
||||
io_func* openRawFile(HFSCatalogNodeID id, HFSPlusForkData* forkData, HFSPlusCatalogRecord* catalogRecord, Volume* volume);
|
||||
|
||||
BTree* openAttributesTree(io_func* file);
|
||||
size_t getAttribute(Volume* volume, uint32_t fileID, const char* name, uint8_t** data);
|
||||
int setAttribute(Volume* volume, uint32_t fileID, const char* name, uint8_t* data, size_t size);
|
||||
int unsetAttribute(Volume* volume, uint32_t fileID, const char* name);
|
||||
XAttrList* getAllExtendedAttributes(HFSCatalogNodeID CNID, Volume* volume);
|
||||
|
||||
void flipExtentRecord(HFSPlusExtentRecord* extentRecord);
|
||||
|
||||
BTree* openExtentsTree(io_func* file);
|
||||
|
||||
void ASCIIToUnicode(const char* ascii, HFSUniStr255* unistr);
|
||||
|
||||
void flipCatalogFolder(HFSPlusCatalogFolder* record);
|
||||
void flipCatalogFile(HFSPlusCatalogFile* record);
|
||||
void flipCatalogThread(HFSPlusCatalogThread* record, int out);
|
||||
|
||||
BTree* openCatalogTree(io_func* file);
|
||||
int updateCatalog(Volume* volume, HFSPlusCatalogRecord* catalogRecord);
|
||||
int move(const char* source, const char* dest, Volume* volume);
|
||||
int removeFile(const char* fileName, Volume* volume);
|
||||
HFSCatalogNodeID newFolder(const char* pathName, Volume* volume);
|
||||
HFSCatalogNodeID newFile(const char* pathName, Volume* volume);
|
||||
int chmodFile(const char* pathName, int mode, Volume* volume);
|
||||
int chownFile(const char* pathName, uint32_t owner, uint32_t group, Volume* volume);
|
||||
int makeSymlink(const char* pathName, const char* target, Volume* volume);
|
||||
|
||||
HFSCatalogNodeID getMetadataDirectoryID(Volume* volume);
|
||||
HFSPlusCatalogRecord* getRecordByCNID(HFSCatalogNodeID CNID, Volume* volume);
|
||||
HFSPlusCatalogRecord* getLinkTarget(HFSPlusCatalogRecord* record, HFSCatalogNodeID parentID, HFSPlusCatalogKey *key, Volume* volume);
|
||||
CatalogRecordList* getFolderContents(HFSCatalogNodeID CNID, Volume* volume);
|
||||
HFSPlusCatalogRecord* getRecordFromPath(const char* path, Volume* volume, char **name, HFSPlusCatalogKey* retKey);
|
||||
HFSPlusCatalogRecord* getRecordFromPath2(const char* path, Volume* volume, char **name, HFSPlusCatalogKey* retKey, char traverse);
|
||||
HFSPlusCatalogRecord* getRecordFromPath3(const char* path, Volume* volume, char **name, HFSPlusCatalogKey* retKey, char traverse, char returnLink, HFSCatalogNodeID parentID);
|
||||
void releaseCatalogRecordList(CatalogRecordList* list);
|
||||
|
||||
int isBlockUsed(Volume* volume, uint32_t block);
|
||||
int setBlockUsed(Volume* volume, uint32_t block, int used);
|
||||
int allocate(RawFile* rawFile, off_t size);
|
||||
|
||||
void flipForkData(HFSPlusForkData* forkData);
|
||||
|
||||
Volume* openVolume(io_func* io);
|
||||
void closeVolume(Volume *volume);
|
||||
int updateVolume(Volume* volume);
|
||||
|
||||
int debugBTree(BTree* tree, int displayTree);
|
||||
|
||||
int addToBTree(BTree* tree, BTKey* searchKey, size_t length, unsigned char* content);
|
||||
|
||||
int removeFromBTree(BTree* tree, BTKey* searchKey);
|
||||
|
||||
int32_t FastUnicodeCompare ( register uint16_t str1[], register uint16_t length1,
|
||||
register uint16_t str2[], register uint16_t length2);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
5
dump-imessages/iphone-dataprotection/img3fs/Makefile
Normal file
5
dump-imessages/iphone-dataprotection/img3fs/Makefile
Normal file
@ -0,0 +1,5 @@
|
||||
img3fs: img3fs.c
|
||||
gcc -o $@ $^ -Wall -lfuse_ino64 -lcrypto -I/usr/local/include/osxfuse || gcc -o $@ $^ -Wall -losxfuse_i64 -lcrypto -I/usr/local/include/osxfuse
|
||||
|
||||
clean:
|
||||
rm img3fs
|
12
dump-imessages/iphone-dataprotection/img3fs/README
Normal file
12
dump-imessages/iphone-dataprotection/img3fs/README
Normal file
@ -0,0 +1,12 @@
|
||||
FUSE img3 filesystem
|
||||
read/write/encryption support
|
||||
|
||||
Usage example:
|
||||
|
||||
mkdir /tmp/img3
|
||||
img3fs /tmp/img3 038-0032-002.dmg -iv 9b20ae16bebf4cf1b9101374c3ab0095 -key 06849aead2e9a6ca8a82c3929bad5c2368942e3681a3d5751720d2aacf0694c0
|
||||
hdiutil attach /tmp/img3/DATA.dmg
|
||||
rm -rf /Volumes/ramdisk/usr/local/standalone/firmware/*
|
||||
echo "Hello World!" > /Volumes/ramdisk/hello.txt
|
||||
hdiutil eject /Volumes/ramdisk
|
||||
umount /tmp/img3
|
466
dump-imessages/iphone-dataprotection/img3fs/img3fs.c
Normal file
466
dump-imessages/iphone-dataprotection/img3fs/img3fs.c
Normal file
@ -0,0 +1,466 @@
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <stddef.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <string.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
#include <openssl/evp.h>
|
||||
|
||||
#define _FILE_OFFSET_BITS 64
|
||||
#define FUSE_USE_VERSION 26
|
||||
#include <fuse.h>
|
||||
|
||||
#define MAX_IMG3_ELEMENTS 15
|
||||
|
||||
struct img3_config
|
||||
{
|
||||
char *img3filename;
|
||||
char *iv;
|
||||
char *key;
|
||||
};
|
||||
|
||||
#define MYFS_OPT(t, p, v) { t, offsetof(struct img3_config, p), v }
|
||||
|
||||
static struct fuse_opt img3_opts[] = {
|
||||
MYFS_OPT("-iv %s", iv, 0),
|
||||
MYFS_OPT("-key %s", key, 0),
|
||||
FUSE_OPT_END
|
||||
};
|
||||
|
||||
typedef struct IMG3Header
|
||||
{
|
||||
uint32_t magic;
|
||||
uint32_t fullSize;
|
||||
uint32_t sizeNoPack;
|
||||
uint32_t sigCheckArea;
|
||||
uint32_t iden;
|
||||
} __attribute__((packed)) IMG3Header;
|
||||
|
||||
typedef struct IMG3Element
|
||||
{
|
||||
uint32_t magic;
|
||||
uint32_t total_length;
|
||||
uint32_t data_length;
|
||||
uint32_t offset;
|
||||
char name[10];
|
||||
} __attribute__((packed)) IMG3Element;
|
||||
|
||||
typedef struct KBAG
|
||||
{
|
||||
uint32_t cryptState;// 1 if the key and IV in the KBAG are encrypted with the GID-key
|
||||
// 2 is used with a second KBAG for the S5L8920, use is unknown.
|
||||
uint32_t aesType; // 0x80 = aes-128, 0xc0 = aes-192, 0x100 = aes256
|
||||
char EncIV[16];
|
||||
union
|
||||
{
|
||||
char EncKey128[16];
|
||||
char EncKey192[24];
|
||||
char EncKey256[32];
|
||||
} key;
|
||||
} KBAG;
|
||||
|
||||
typedef struct IMG3
|
||||
{
|
||||
int fd;
|
||||
uint8_t* mmap;
|
||||
uint32_t aesType;//0=no aes, 0x80 = aes-128, 0xc0 = aes-192, 0x100 = aes256
|
||||
const EVP_CIPHER* cipherType;
|
||||
uint8_t iv[16];
|
||||
uint8_t key[32];
|
||||
uint8_t* decrypted_data;
|
||||
int data_was_modified;
|
||||
|
||||
uint32_t size;
|
||||
struct IMG3Header header;
|
||||
uint32_t num_elements;
|
||||
struct IMG3Element* data_element;
|
||||
struct IMG3Element elements[MAX_IMG3_ELEMENTS];
|
||||
} IMG3;
|
||||
|
||||
void hexToBytes(const char* hex, uint8_t* buffer, size_t bufferLen) {
|
||||
size_t i;
|
||||
for(i = 0; i < bufferLen && *hex != '\0'; i++) {
|
||||
uint32_t byte;
|
||||
sscanf(hex, "%02x", &byte);
|
||||
buffer[i] = byte;
|
||||
hex += 2;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Check for magic constants/strings in decrypted data to get an idea if the IV/key were ok
|
||||
**/
|
||||
char* img3_check_decrypted_data(const uint8_t* buffer, uint32_t len)
|
||||
{
|
||||
if (len > 16 && !strncmp("complzss", buffer, 8))
|
||||
{
|
||||
return "kernelcache";
|
||||
}
|
||||
if (len > 0x800 && !strncmp("H+", &buffer[0x400], 2))
|
||||
{
|
||||
return "ramdisk";
|
||||
}
|
||||
if (len > 0x300 && !strncmp("iBoot", &buffer[0x280], 5))
|
||||
{
|
||||
return "bootloader";
|
||||
}
|
||||
//TODO devicetree, logos
|
||||
return NULL;
|
||||
}
|
||||
|
||||
IMG3* img3_init(struct img3_config* config)
|
||||
{
|
||||
IMG3* img3 = NULL;
|
||||
struct stat st;
|
||||
uint32_t len,offset,i,keylen;
|
||||
|
||||
if(stat(config->img3filename, &st) == -1)
|
||||
{
|
||||
perror("stat");
|
||||
return NULL;
|
||||
}
|
||||
len = st.st_size;
|
||||
|
||||
int fd = open(config->img3filename, O_RDWR);
|
||||
if (fd == -1)
|
||||
{
|
||||
perror("open");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
img3 = malloc(sizeof(IMG3));
|
||||
|
||||
if (img3 == NULL)
|
||||
{
|
||||
perror("malloc");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
img3->fd = fd;
|
||||
img3->size = len;
|
||||
img3->num_elements = 0;
|
||||
img3->aesType = 0;
|
||||
img3->data_was_modified = 0;
|
||||
img3->cipherType = NULL;
|
||||
img3->decrypted_data = NULL;
|
||||
img3->data_element = NULL;
|
||||
img3->mmap = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
if (img3->mmap == (void*) -1)
|
||||
{
|
||||
perror("mmap");
|
||||
free(img3);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
keylen = 0;
|
||||
if (config->iv != NULL && config->key != NULL)
|
||||
{
|
||||
if(strlen(config->iv) != 32)
|
||||
{
|
||||
printf("IV must be 16 bytes\n");
|
||||
free(img3);
|
||||
return NULL;
|
||||
}
|
||||
keylen = strlen(config->key);
|
||||
if (keylen != 32 && keylen != 64 && keylen != 48)
|
||||
{
|
||||
printf("Key must be 16,24 or 32 bytes\n");
|
||||
free(img3);
|
||||
return NULL;
|
||||
}
|
||||
hexToBytes(config->iv, img3->iv, 16);
|
||||
hexToBytes(config->key, img3->key, 32);
|
||||
}
|
||||
|
||||
if(read(fd, &img3->header, sizeof(IMG3Header)) != sizeof(IMG3Header))
|
||||
{
|
||||
perror("read IMG3 header");
|
||||
free(img3);
|
||||
return NULL;
|
||||
}
|
||||
if(img3->header.magic != 'Img3' || img3->header.fullSize != len)
|
||||
{
|
||||
printf("bad magic or len\n");
|
||||
free(img3);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for(offset=sizeof(IMG3Header),i=0; offset < len && i < MAX_IMG3_ELEMENTS; i++)
|
||||
{
|
||||
if(lseek(fd, offset, SEEK_SET) == -1)
|
||||
break;
|
||||
if(read(fd, &img3->elements[i], 12) != 12)
|
||||
break;
|
||||
if(img3->elements[i].total_length < 12)
|
||||
break;
|
||||
if(img3->elements[i].data_length > img3->elements[i].total_length)
|
||||
break;
|
||||
if(offset + img3->elements[i].data_length < offset)
|
||||
break;
|
||||
if(offset + img3->elements[i].total_length < offset)
|
||||
break;
|
||||
if(offset + img3->elements[i].total_length > len)
|
||||
break;
|
||||
img3->elements[i].offset = offset + 12;
|
||||
img3->elements[i].name[0] = (img3->elements[i].magic & 0xff000000) >> 24;
|
||||
img3->elements[i].name[1] = (img3->elements[i].magic & 0xff0000) >> 16;
|
||||
img3->elements[i].name[2] = (img3->elements[i].magic & 0xff00) >> 8;
|
||||
img3->elements[i].name[3] = (img3->elements[i].magic & 0xff);
|
||||
img3->elements[i].name[4] = 0;
|
||||
|
||||
printf("TAG: %s OFFSET %x data_length:%x\n", img3->elements[i].name, offset, img3->elements[i].data_length);
|
||||
|
||||
if (img3->elements[i].magic == 'KBAG')
|
||||
{
|
||||
KBAG* kbag = (KBAG*) &(img3->mmap[offset+12]);
|
||||
if(kbag->cryptState == 1)
|
||||
{
|
||||
if( kbag->aesType != 0x80 && kbag->aesType != 0xC0 && kbag->aesType != 0x100)
|
||||
{
|
||||
printf("Unknown aesType %x\n", kbag->aesType);
|
||||
}
|
||||
else if (keylen*4 != kbag->aesType)
|
||||
{
|
||||
printf("Wrong length for key parameter, got %d, aesType is %x\n", keylen, kbag->aesType);
|
||||
free(img3);
|
||||
return NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("KBAG cryptState=%x aesType=%x\n", kbag->cryptState, kbag->aesType);
|
||||
img3->aesType = kbag->aesType;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (img3->elements[i].magic == 'DATA')
|
||||
{
|
||||
img3->data_element = &img3->elements[i];
|
||||
|
||||
if (img3->header.iden == 'rdsk')
|
||||
{
|
||||
//XXX hdiutil fails if extension is not .dmg
|
||||
strcpy(img3->elements[i].name, "DATA.dmg");
|
||||
}
|
||||
}
|
||||
offset += img3->elements[i].total_length;
|
||||
}
|
||||
img3->num_elements = i;
|
||||
|
||||
if(img3->data_element != NULL && img3->aesType != 0)
|
||||
{
|
||||
img3->decrypted_data = malloc(img3->data_element->data_length);
|
||||
if (img3->decrypted_data == NULL)
|
||||
{
|
||||
perror("FAIL: malloc(img3->data_element->data_length)");
|
||||
free(img3);
|
||||
return NULL;
|
||||
}
|
||||
switch(img3->aesType)
|
||||
{
|
||||
case 0x80:
|
||||
img3->cipherType = EVP_aes_128_cbc();
|
||||
break;
|
||||
case 0xC0:
|
||||
img3->cipherType = EVP_aes_192_cbc();
|
||||
break;
|
||||
case 0x100:
|
||||
img3->cipherType = EVP_aes_256_cbc();
|
||||
break;
|
||||
default:
|
||||
return img3; //should not reach
|
||||
}
|
||||
EVP_CIPHER_CTX ctx;
|
||||
uint32_t decryptedLength = (img3->data_element->total_length - 12) & ~0xf;
|
||||
printf("Decrypting DATA section\n");
|
||||
|
||||
EVP_CIPHER_CTX_init(&ctx);
|
||||
EVP_DecryptInit_ex(&ctx, img3->cipherType, NULL, img3->key, img3->iv);
|
||||
EVP_DecryptUpdate(&ctx, img3->decrypted_data, &decryptedLength,
|
||||
&img3->mmap[img3->data_element->offset], decryptedLength);
|
||||
|
||||
char* info = img3_check_decrypted_data(img3->decrypted_data, decryptedLength);
|
||||
if(info != NULL)
|
||||
{
|
||||
printf("Decrypted data seems OK : %s\n", info);
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Unknown decrypted data, key/iv might be wrong\n");
|
||||
}
|
||||
}
|
||||
return img3;
|
||||
}
|
||||
static IMG3* img3 = NULL;
|
||||
|
||||
static void img3_destroy()
|
||||
{
|
||||
if (img3->aesType != 0 && img3->decrypted_data != NULL && img3->data_was_modified)
|
||||
{
|
||||
EVP_CIPHER_CTX ctx;
|
||||
uint32_t encryptedLength = img3->data_element->total_length - 12;
|
||||
//printf("Encrypting DATA section\n");
|
||||
EVP_CIPHER_CTX_init(&ctx);
|
||||
EVP_EncryptInit_ex(&ctx, img3->cipherType, NULL, img3->key, img3->iv);
|
||||
EVP_EncryptUpdate(&ctx, &img3->mmap[img3->data_element->offset], &encryptedLength,
|
||||
img3->decrypted_data, encryptedLength);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
img3_getattr(const char *path, struct stat *stbuf)
|
||||
{
|
||||
int i;
|
||||
memset(stbuf, 0, sizeof(struct stat));
|
||||
|
||||
if(!strcmp(path, "/"))
|
||||
{
|
||||
stbuf->st_mode = S_IFDIR | 0777;
|
||||
stbuf->st_nlink = 3;
|
||||
return 0;
|
||||
}
|
||||
for(i=0; i < img3->num_elements; i++)
|
||||
{
|
||||
if(!strcmp(path+1, img3->elements[i].name))
|
||||
{
|
||||
stbuf->st_mode = S_IFREG | 0666;
|
||||
stbuf->st_nlink = 1;
|
||||
stbuf->st_size = img3->elements[i].data_length;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int
|
||||
img3_open(const char *path, struct fuse_file_info *fi)
|
||||
{
|
||||
int i;
|
||||
for(i=0; i < img3->num_elements; i++)
|
||||
{
|
||||
if(!strcmp(path+1, img3->elements[i].name))
|
||||
{
|
||||
fi->fh = i;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int
|
||||
img3_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
|
||||
off_t offset, struct fuse_file_info *fi)
|
||||
{
|
||||
int i;
|
||||
if(strcmp(path, "/"))
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
filler(buf, ".", NULL, 0);
|
||||
filler(buf, "..", NULL, 0);
|
||||
|
||||
for(i=0; i < img3->num_elements; i++)
|
||||
{
|
||||
filler(buf, img3->elements[i].name, NULL, 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
img3_read(const char *path, char *buf, size_t size, off_t offset,
|
||||
struct fuse_file_info *fi)
|
||||
{
|
||||
IMG3Element* element = &img3->elements[fi->fh];
|
||||
|
||||
if (offset >= element->data_length) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (offset + size > element->data_length) { /* Trim the read to the file size. */
|
||||
size = element->data_length - offset;
|
||||
}
|
||||
|
||||
if (img3->aesType != 0 && element == img3->data_element)
|
||||
{
|
||||
memcpy(buf, img3->decrypted_data + offset, size);
|
||||
return size;
|
||||
}
|
||||
lseek(img3->fd, element->offset + offset, SEEK_SET);
|
||||
return read(img3->fd, buf, size);
|
||||
}
|
||||
|
||||
static int
|
||||
img3_write(const char *path, const char *buf, size_t size, off_t offset,
|
||||
struct fuse_file_info *fi)
|
||||
{
|
||||
IMG3Element* element = &img3->elements[fi->fh];
|
||||
|
||||
if (offset >= element->data_length) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (offset + size > element->data_length) { /* Trim the write to the file size. */
|
||||
size = element->data_length - offset;
|
||||
}
|
||||
|
||||
if (img3->aesType != 0 && element == img3->data_element)
|
||||
{
|
||||
img3->data_was_modified = 1;
|
||||
memcpy(img3->decrypted_data + offset, buf, size);
|
||||
return size;
|
||||
}
|
||||
|
||||
lseek(img3->fd, element->offset + offset, SEEK_SET);
|
||||
return write(img3->fd, buf, size);
|
||||
}
|
||||
|
||||
|
||||
static struct fuse_operations img3_filesystem_operations = {
|
||||
.getattr = img3_getattr,
|
||||
.open = img3_open,
|
||||
.read = img3_read,
|
||||
.write = img3_write,
|
||||
.readdir = img3_readdir,
|
||||
.destroy = img3_destroy
|
||||
};
|
||||
|
||||
|
||||
static int img3_opt_proc(void *data, const char *arg, int key, struct fuse_args *outargs)
|
||||
{
|
||||
static int i = -1;
|
||||
struct img3_config* config = (struct img3_config*) data;
|
||||
|
||||
i++;
|
||||
if (key == FUSE_OPT_KEY_NONOPT && i == 1)
|
||||
{
|
||||
config->img3filename = strdup(arg);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct fuse_args args = FUSE_ARGS_INIT(argc, argv);
|
||||
struct img3_config commandline_conf;
|
||||
memset(&commandline_conf, 0, sizeof(commandline_conf));
|
||||
|
||||
fuse_opt_parse(&args, &commandline_conf, img3_opts, img3_opt_proc);
|
||||
|
||||
if (commandline_conf.img3filename == NULL)
|
||||
{
|
||||
printf("Usage %s mountpoint img3filename [-key KEY -iv IV]\n", argv[0]);
|
||||
return -1;
|
||||
}
|
||||
img3 = img3_init(&commandline_conf);
|
||||
if (img3 != NULL)
|
||||
{
|
||||
return fuse_main(args.argc, args.argv, &img3_filesystem_operations, img3_opt_proc);
|
||||
}
|
||||
return 0;
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
Dependencies
|
||||
pycrypto (https://www.dlitz.net/software/pycrypto/)
|
||||
construct (http://construct.wikispaces.com/)
|
@ -0,0 +1,78 @@
|
||||
from backups.backup3 import decrypt_backup3
|
||||
from backups.backup4 import MBDB
|
||||
from keystore.keybag import Keybag
|
||||
from util import readPlist, makedirs
|
||||
import os
|
||||
import sys
|
||||
import plistlib
|
||||
|
||||
showinfo = ["Device Name", "Display Name", "Last Backup Date", "IMEI",
|
||||
"Serial Number", "Product Type", "Product Version", "iTunes Version"]
|
||||
|
||||
def extract_backup(backup_path, output_path, password=""):
|
||||
if not os.path.exists(backup_path + "/Manifest.plist"):
|
||||
print "Manifest.plist not found"
|
||||
return
|
||||
manifest = readPlist(backup_path + "/Manifest.plist")
|
||||
|
||||
info = readPlist( backup_path + "/Info.plist")
|
||||
for i in showinfo:
|
||||
print i + " : " + unicode(info.get(i, "missing"))
|
||||
|
||||
print "Extract backup to %s ? (y/n)" % output_path
|
||||
if raw_input() == "n":
|
||||
return
|
||||
|
||||
print "Backup is %sencrypted" % (int(not manifest["IsEncrypted"]) * "not ")
|
||||
|
||||
if manifest["IsEncrypted"] and password == "":
|
||||
print "Enter backup password : "
|
||||
password = raw_input()
|
||||
|
||||
if not manifest.has_key("BackupKeyBag"):
|
||||
print "No BackupKeyBag in manifest, assuming iOS 3.x backup"
|
||||
decrypt_backup3(backup_path, output_path, password)
|
||||
else:
|
||||
mbdb = MBDB(backup_path)
|
||||
|
||||
kb = Keybag.createWithBackupManifest(manifest, password)
|
||||
if not kb:
|
||||
return
|
||||
manifest["password"] = password
|
||||
makedirs(output_path)
|
||||
plistlib.writePlist(manifest, output_path + "/Manifest.plist")
|
||||
|
||||
mbdb.keybag = kb
|
||||
mbdb.extract_backup(output_path)
|
||||
|
||||
print "You can decrypt the keychain using the following command : "
|
||||
print "python keychain_tool.py -d \"%s\" \"%s\"" % (output_path + "/KeychainDomain/keychain-backup.plist", output_path + "/Manifest.plist")
|
||||
|
||||
def extract_all():
|
||||
if sys.platform == "win32":
|
||||
mobilesync = os.environ["APPDATA"] + "/Apple Computer/MobileSync/Backup/"
|
||||
elif sys.platform == "darwin":
|
||||
mobilesync = os.environ["HOME"] + "/Library/Application Support/MobileSync/Backup/"
|
||||
else:
|
||||
print "Unsupported operating system"
|
||||
return
|
||||
print "-" * 60
|
||||
print "Searching for iTunes backups"
|
||||
print "-" * 60
|
||||
|
||||
for udid in os.listdir(mobilesync):
|
||||
extract_backup(mobilesync + "/" + udid, udid + "_extract")
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print "Usage: %s <backup path> [output path]" % sys.argv[0]
|
||||
return
|
||||
backup_path = sys.argv[1]
|
||||
output_path = os.path.dirname(backup_path) + "_extract"
|
||||
if len(sys.argv) >= 3:
|
||||
output_path = sys.argv[2]
|
||||
|
||||
extract_backup(backup_path, output_path)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,67 @@
|
||||
from crypto.PBKDF2 import PBKDF2
|
||||
from crypto.aes import AESdecryptCBC
|
||||
from util import read_file, write_file, makedirs, readPlist
|
||||
from util.bplist import BPlistReader
|
||||
import hashlib
|
||||
import struct
|
||||
import glob
|
||||
import sys
|
||||
import os
|
||||
|
||||
"""
|
||||
decrypt iOS 3 backup blob (metadata and file contents)
|
||||
"""
|
||||
|
||||
def decrypt_blob(blob, auth_key):
|
||||
len = struct.unpack(">H", blob[0:2])[0]
|
||||
if len != 66:
|
||||
print "blob len != 66"
|
||||
magic = struct.unpack(">H", blob[2:4])[0]
|
||||
if magic != 0x0100:
|
||||
print "magic != 0x0100"
|
||||
iv = blob[4:20]
|
||||
|
||||
blob_key = AESdecryptCBC(blob[20:68], auth_key, iv)[:32]
|
||||
|
||||
return AESdecryptCBC(blob[68:], blob_key, iv, padding=True)
|
||||
|
||||
def decrypt_backup3(backupfolder, outputfolder, passphrase):
|
||||
auth_key = None
|
||||
manifest = readPlist(backupfolder + "/Manifest.plist")
|
||||
|
||||
if manifest["IsEncrypted"]:
|
||||
manifest_data = manifest["Data"].data
|
||||
|
||||
authdata = manifest["AuthData"].data
|
||||
|
||||
pkbdf_salt = authdata[:8]
|
||||
iv = authdata[8:24]
|
||||
key = PBKDF2(passphrase,pkbdf_salt,iterations=2000).read(32)
|
||||
|
||||
data = AESdecryptCBC(authdata[24:], key, iv)
|
||||
auth_key = data[:32]
|
||||
|
||||
if hashlib.sha1(auth_key).digest() != data[32:52]:
|
||||
print "wrong auth key (hash mismatch) => wrong passphrase"
|
||||
return
|
||||
|
||||
print "Passphrase seems OK"
|
||||
|
||||
for mdinfo_name in glob.glob(backupfolder + "/*.mdinfo"):
|
||||
|
||||
mddata_name = mdinfo_name[:-7] + ".mddata"
|
||||
mdinfo = readPlist(mdinfo_name)
|
||||
metadata = mdinfo["Metadata"].data
|
||||
if mdinfo["IsEncrypted"]:
|
||||
metadata = decrypt_blob(metadata, auth_key)
|
||||
metadata = BPlistReader.plistWithString(metadata)
|
||||
|
||||
print metadata["Path"]
|
||||
|
||||
filedata = read_file(mddata_name)
|
||||
if mdinfo["IsEncrypted"]:
|
||||
filedata = decrypt_blob(filedata, auth_key)
|
||||
|
||||
filename = metadata["Path"]
|
||||
makedirs(outputfolder + "/" + os.path.dirname(filename))
|
||||
write_file(outputfolder + "/" + filename, filedata)
|
@ -0,0 +1,174 @@
|
||||
from Crypto.Cipher import AES
|
||||
from hashlib import sha1
|
||||
from struct import unpack
|
||||
import os
|
||||
|
||||
MBDB_SIGNATURE = 'mbdb\x05\x00'
|
||||
|
||||
MASK_SYMBOLIC_LINK = 0xa000
|
||||
MASK_REGULAR_FILE = 0x8000
|
||||
MASK_DIRECTORY = 0x4000
|
||||
|
||||
def warn(msg):
|
||||
print "WARNING: %s" % msg
|
||||
|
||||
class MBFileRecord(object):
|
||||
def __init__(self, mbdb):
|
||||
self.domain = self._decode_string(mbdb)
|
||||
if self.domain is None:
|
||||
warn("Domain name missing from record")
|
||||
|
||||
self.path = self._decode_string(mbdb)
|
||||
if self.path is None:
|
||||
warn("Relative path missing from record")
|
||||
|
||||
self.target= self._decode_string(mbdb) # for symbolic links
|
||||
|
||||
self.digest = self._decode_string(mbdb)
|
||||
self.encryption_key = self._decode_data(mbdb)
|
||||
|
||||
data = mbdb.read(40) # metadata, fixed size
|
||||
|
||||
self.mode, = unpack('>H', data[0:2])
|
||||
if not(self.is_regular_file() or self.is_symbolic_link() or self.is_directory()):
|
||||
print self.mode
|
||||
warn("File type mising from record mode")
|
||||
|
||||
if self.is_symbolic_link() and self.target is None:
|
||||
warn("Target required for symblolic links")
|
||||
|
||||
self.inode_number = unpack('>Q', data[2:10])
|
||||
self.user_id, = unpack('>I', data[10:14])
|
||||
self.group_id = unpack('>I', data[14:18])
|
||||
self.last_modification_time, = unpack('>i', data[18:22])
|
||||
self.last_status_change_time, = unpack('>i', data[22:26])
|
||||
self.birth_time, = unpack('>i', data[26:30])
|
||||
self.size, = unpack('>q', data[30:38])
|
||||
|
||||
if self.size != 0 and not self.is_regular_file():
|
||||
warn("Non-zero size for a record which is not a regular file")
|
||||
|
||||
self.protection_class = ord(data[38])
|
||||
|
||||
num_attributes = ord(data[39])
|
||||
if num_attributes == 0:
|
||||
self.extended_attributes = None
|
||||
else:
|
||||
self.extended_attributes = {}
|
||||
for i in xrange(num_attributes):
|
||||
k = self._decode_string(mbdb)
|
||||
v = self._decode_data(mbdb)
|
||||
self.extended_attributes[k] = v
|
||||
|
||||
def _decode_string(self, s):
|
||||
s_len, = unpack('>H', s.read(2))
|
||||
if s_len == 0xffff:
|
||||
return None
|
||||
return s.read(s_len)
|
||||
|
||||
def _decode_data(self, s):
|
||||
return self._decode_string(s)
|
||||
|
||||
def type(self):
|
||||
return self.mode & 0xf000
|
||||
|
||||
def is_symbolic_link(self):
|
||||
return self.type() == MASK_SYMBOLIC_LINK
|
||||
|
||||
def is_regular_file(self):
|
||||
return self.type() == MASK_REGULAR_FILE
|
||||
|
||||
def is_directory(self):
|
||||
return self.type() == MASK_DIRECTORY
|
||||
|
||||
class MBDB(object):
|
||||
def __init__(self, path):
|
||||
self.files = {}
|
||||
self.backup_path = path
|
||||
self.keybag = None
|
||||
# open the database
|
||||
mbdb = file(path + '/Manifest.mbdb', 'rb')
|
||||
|
||||
# skip signature
|
||||
signature = mbdb.read(len(MBDB_SIGNATURE))
|
||||
if signature != MBDB_SIGNATURE:
|
||||
raise Exception("Bad mbdb signature")
|
||||
try:
|
||||
while True:
|
||||
rec = MBFileRecord(mbdb)
|
||||
fn = rec.domain + "-" + rec.path
|
||||
sb = sha1(fn).digest().encode('hex')
|
||||
if len(sb) % 2 == 1:
|
||||
sb = '0'+sb
|
||||
self.files[sb] = rec
|
||||
except:
|
||||
mbdb.close()
|
||||
|
||||
def get_file_by_name(self, filename):
|
||||
for (k, v) in self.files.iteritems():
|
||||
if v.path == filename:
|
||||
return (k, v)
|
||||
return None
|
||||
|
||||
def extract_backup(self, output_path):
|
||||
for record in self.files.values():
|
||||
# create directories if they do not exist
|
||||
# makedirs throw an exception, my code is ugly =)
|
||||
if record.is_directory():
|
||||
try:
|
||||
os.makedirs(os.path.join(output_path, record.domain, record.path))
|
||||
except:
|
||||
pass
|
||||
|
||||
for (filename, record) in self.files.items():
|
||||
# skip directories
|
||||
if record.is_directory():
|
||||
continue
|
||||
self.extract_file(filename, record, output_path)
|
||||
|
||||
def extract_file(self, filename, record, output_path):
|
||||
# adjust output file name
|
||||
if record.is_symbolic_link():
|
||||
out_file = record.target
|
||||
else:
|
||||
out_file = record.path
|
||||
|
||||
# read backup file
|
||||
try:
|
||||
f1 = file(os.path.join(self.backup_path, filename), 'rb')
|
||||
except(IOError):
|
||||
warn("File %s (%s) has not been found" % (filename, record.path))
|
||||
return
|
||||
|
||||
# write output file
|
||||
output_path = os.path.join(output_path, record.domain, out_file)
|
||||
print("Writing %s" % output_path)
|
||||
f2 = file(output_path, 'wb')
|
||||
|
||||
aes = None
|
||||
|
||||
if record.encryption_key is not None and self.keybag: # file is encrypted!
|
||||
key = self.keybag.unwrapKeyForClass(record.protection_class, record.encryption_key[4:])
|
||||
if not key:
|
||||
warn("Cannot unwrap key")
|
||||
return
|
||||
aes = AES.new(key, AES.MODE_CBC, "\x00"*16)
|
||||
|
||||
while True:
|
||||
data = f1.read(8192)
|
||||
if not data:
|
||||
break
|
||||
if aes:
|
||||
data2 = data = aes.decrypt(data)
|
||||
f2.write(data)
|
||||
|
||||
f1.close()
|
||||
if aes:
|
||||
c = data2[-1]
|
||||
i = ord(c)
|
||||
if i < 17 and data2.endswith(c*i):
|
||||
f2.truncate(f2.tell() - i)
|
||||
else:
|
||||
warn("Bad padding, last byte = 0x%x !" % i)
|
||||
|
||||
f2.close()
|
@ -0,0 +1,354 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: ascii -*-
|
||||
###########################################################################
|
||||
# PBKDF2.py - PKCS#5 v2.0 Password-Based Key Derivation
|
||||
#
|
||||
# Copyright (C) 2007, 2008 Dwayne C. Litzenberger <dlitz@dlitz.net>
|
||||
# All rights reserved.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose and without fee is hereby granted,
|
||||
# provided that the above copyright notice appear in all copies and that
|
||||
# both that copyright notice and this permission notice appear in
|
||||
# supporting documentation.
|
||||
#
|
||||
# THE AUTHOR PROVIDES THIS SOFTWARE ``AS IS'' AND ANY EXPRESSED OR
|
||||
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Country of origin: Canada
|
||||
#
|
||||
###########################################################################
|
||||
# Sample PBKDF2 usage:
|
||||
# from Crypto.Cipher import AES
|
||||
# from PBKDF2 import PBKDF2
|
||||
# import os
|
||||
#
|
||||
# salt = os.urandom(8) # 64-bit salt
|
||||
# key = PBKDF2("This passphrase is a secret.", salt).read(32) # 256-bit key
|
||||
# iv = os.urandom(16) # 128-bit IV
|
||||
# cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
# ...
|
||||
#
|
||||
# Sample crypt() usage:
|
||||
# from PBKDF2 import crypt
|
||||
# pwhash = crypt("secret")
|
||||
# alleged_pw = raw_input("Enter password: ")
|
||||
# if pwhash == crypt(alleged_pw, pwhash):
|
||||
# print "Password good"
|
||||
# else:
|
||||
# print "Invalid password"
|
||||
#
|
||||
###########################################################################
|
||||
# History:
|
||||
#
|
||||
# 2007-07-27 Dwayne C. Litzenberger <dlitz@dlitz.net>
|
||||
# - Initial Release (v1.0)
|
||||
#
|
||||
# 2007-07-31 Dwayne C. Litzenberger <dlitz@dlitz.net>
|
||||
# - Bugfix release (v1.1)
|
||||
# - SECURITY: The PyCrypto XOR cipher (used, if available, in the _strxor
|
||||
# function in the previous release) silently truncates all keys to 64
|
||||
# bytes. The way it was used in the previous release, this would only be
|
||||
# problem if the pseudorandom function that returned values larger than
|
||||
# 64 bytes (so SHA1, SHA256 and SHA512 are fine), but I don't like
|
||||
# anything that silently reduces the security margin from what is
|
||||
# expected.
|
||||
#
|
||||
# 2008-06-17 Dwayne C. Litzenberger <dlitz@dlitz.net>
|
||||
# - Compatibility release (v1.2)
|
||||
# - Add support for older versions of Python (2.2 and 2.3).
|
||||
#
|
||||
###########################################################################
|
||||
|
||||
__version__ = "1.2"
|
||||
|
||||
from struct import pack
|
||||
from binascii import b2a_hex
|
||||
from random import randint
|
||||
import string
|
||||
|
||||
try:
|
||||
# Use PyCrypto (if available)
|
||||
from Crypto.Hash import HMAC, SHA as SHA1
|
||||
|
||||
except ImportError:
|
||||
# PyCrypto not available. Use the Python standard library.
|
||||
import hmac as HMAC
|
||||
import sha as SHA1
|
||||
|
||||
def strxor(a, b):
|
||||
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])
|
||||
|
||||
def b64encode(data, chars="+/"):
|
||||
tt = string.maketrans("+/", chars)
|
||||
return data.encode('base64').replace("\n", "").translate(tt)
|
||||
|
||||
class PBKDF2(object):
|
||||
"""PBKDF2.py : PKCS#5 v2.0 Password-Based Key Derivation
|
||||
|
||||
This implementation takes a passphrase and a salt (and optionally an
|
||||
iteration count, a digest module, and a MAC module) and provides a
|
||||
file-like object from which an arbitrarily-sized key can be read.
|
||||
|
||||
If the passphrase and/or salt are unicode objects, they are encoded as
|
||||
UTF-8 before they are processed.
|
||||
|
||||
The idea behind PBKDF2 is to derive a cryptographic key from a
|
||||
passphrase and a salt.
|
||||
|
||||
PBKDF2 may also be used as a strong salted password hash. The
|
||||
'crypt' function is provided for that purpose.
|
||||
|
||||
Remember: Keys generated using PBKDF2 are only as strong as the
|
||||
passphrases they are derived from.
|
||||
"""
|
||||
|
||||
def __init__(self, passphrase, salt, iterations=1000,
|
||||
digestmodule=SHA1, macmodule=HMAC):
|
||||
self.__macmodule = macmodule
|
||||
self.__digestmodule = digestmodule
|
||||
self._setup(passphrase, salt, iterations, self._pseudorandom)
|
||||
|
||||
def _pseudorandom(self, key, msg):
|
||||
"""Pseudorandom function. e.g. HMAC-SHA1"""
|
||||
return self.__macmodule.new(key=key, msg=msg,
|
||||
digestmod=self.__digestmodule).digest()
|
||||
|
||||
def read(self, bytes):
|
||||
"""Read the specified number of key bytes."""
|
||||
if self.closed:
|
||||
raise ValueError("file-like object is closed")
|
||||
|
||||
size = len(self.__buf)
|
||||
blocks = [self.__buf]
|
||||
i = self.__blockNum
|
||||
while size < bytes:
|
||||
i += 1
|
||||
if i > 0xffffffffL or i < 1:
|
||||
# We could return "" here, but
|
||||
raise OverflowError("derived key too long")
|
||||
block = self.__f(i)
|
||||
blocks.append(block)
|
||||
size += len(block)
|
||||
buf = "".join(blocks)
|
||||
retval = buf[:bytes]
|
||||
self.__buf = buf[bytes:]
|
||||
self.__blockNum = i
|
||||
return retval
|
||||
|
||||
def __f(self, i):
|
||||
# i must fit within 32 bits
|
||||
assert 1 <= i <= 0xffffffffL
|
||||
U = self.__prf(self.__passphrase, self.__salt + pack("!L", i))
|
||||
result = U
|
||||
for j in xrange(2, 1+self.__iterations):
|
||||
U = self.__prf(self.__passphrase, U)
|
||||
result = strxor(result, U)
|
||||
return result
|
||||
|
||||
def hexread(self, octets):
|
||||
"""Read the specified number of octets. Return them as hexadecimal.
|
||||
|
||||
Note that len(obj.hexread(n)) == 2*n.
|
||||
"""
|
||||
return b2a_hex(self.read(octets))
|
||||
|
||||
def _setup(self, passphrase, salt, iterations, prf):
|
||||
# Sanity checks:
|
||||
|
||||
# passphrase and salt must be str or unicode (in the latter
|
||||
# case, we convert to UTF-8)
|
||||
if isinstance(passphrase, unicode):
|
||||
passphrase = passphrase.encode("UTF-8")
|
||||
if not isinstance(passphrase, str):
|
||||
raise TypeError("passphrase must be str or unicode")
|
||||
if isinstance(salt, unicode):
|
||||
salt = salt.encode("UTF-8")
|
||||
if not isinstance(salt, str):
|
||||
raise TypeError("salt must be str or unicode")
|
||||
|
||||
# iterations must be an integer >= 1
|
||||
if not isinstance(iterations, (int, long)):
|
||||
raise TypeError("iterations must be an integer")
|
||||
if iterations < 1:
|
||||
raise ValueError("iterations must be at least 1")
|
||||
|
||||
# prf must be callable
|
||||
if not callable(prf):
|
||||
raise TypeError("prf must be callable")
|
||||
|
||||
self.__passphrase = passphrase
|
||||
self.__salt = salt
|
||||
self.__iterations = iterations
|
||||
self.__prf = prf
|
||||
self.__blockNum = 0
|
||||
self.__buf = ""
|
||||
self.closed = False
|
||||
|
||||
def close(self):
|
||||
"""Close the stream."""
|
||||
if not self.closed:
|
||||
del self.__passphrase
|
||||
del self.__salt
|
||||
del self.__iterations
|
||||
del self.__prf
|
||||
del self.__blockNum
|
||||
del self.__buf
|
||||
self.closed = True
|
||||
|
||||
def crypt(word, salt=None, iterations=None):
|
||||
"""PBKDF2-based unix crypt(3) replacement.
|
||||
|
||||
The number of iterations specified in the salt overrides the 'iterations'
|
||||
parameter.
|
||||
|
||||
The effective hash length is 192 bits.
|
||||
"""
|
||||
|
||||
# Generate a (pseudo-)random salt if the user hasn't provided one.
|
||||
if salt is None:
|
||||
salt = _makesalt()
|
||||
|
||||
# salt must be a string or the us-ascii subset of unicode
|
||||
if isinstance(salt, unicode):
|
||||
salt = salt.encode("us-ascii")
|
||||
if not isinstance(salt, str):
|
||||
raise TypeError("salt must be a string")
|
||||
|
||||
# word must be a string or unicode (in the latter case, we convert to UTF-8)
|
||||
if isinstance(word, unicode):
|
||||
word = word.encode("UTF-8")
|
||||
if not isinstance(word, str):
|
||||
raise TypeError("word must be a string or unicode")
|
||||
|
||||
# Try to extract the real salt and iteration count from the salt
|
||||
if salt.startswith("$p5k2$"):
|
||||
(iterations, salt, dummy) = salt.split("$")[2:5]
|
||||
if iterations == "":
|
||||
iterations = 400
|
||||
else:
|
||||
converted = int(iterations, 16)
|
||||
if iterations != "%x" % converted: # lowercase hex, minimum digits
|
||||
raise ValueError("Invalid salt")
|
||||
iterations = converted
|
||||
if not (iterations >= 1):
|
||||
raise ValueError("Invalid salt")
|
||||
|
||||
# Make sure the salt matches the allowed character set
|
||||
allowed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./"
|
||||
for ch in salt:
|
||||
if ch not in allowed:
|
||||
raise ValueError("Illegal character %r in salt" % (ch,))
|
||||
|
||||
if iterations is None or iterations == 400:
|
||||
iterations = 400
|
||||
salt = "$p5k2$$" + salt
|
||||
else:
|
||||
salt = "$p5k2$%x$%s" % (iterations, salt)
|
||||
rawhash = PBKDF2(word, salt, iterations).read(24)
|
||||
return salt + "$" + b64encode(rawhash, "./")
|
||||
|
||||
# Add crypt as a static method of the PBKDF2 class
|
||||
# This makes it easier to do "from PBKDF2 import PBKDF2" and still use
|
||||
# crypt.
|
||||
PBKDF2.crypt = staticmethod(crypt)
|
||||
|
||||
def _makesalt():
|
||||
"""Return a 48-bit pseudorandom salt for crypt().
|
||||
|
||||
This function is not suitable for generating cryptographic secrets.
|
||||
"""
|
||||
binarysalt = "".join([pack("@H", randint(0, 0xffff)) for i in range(3)])
|
||||
return b64encode(binarysalt, "./")
|
||||
|
||||
def test_pbkdf2():
|
||||
"""Module self-test"""
|
||||
from binascii import a2b_hex
|
||||
|
||||
#
|
||||
# Test vectors from RFC 3962
|
||||
#
|
||||
|
||||
# Test 1
|
||||
result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1).read(16)
|
||||
expected = a2b_hex("cdedb5281bb2f801565a1122b2563515")
|
||||
if result != expected:
|
||||
raise RuntimeError("self-test failed")
|
||||
|
||||
# Test 2
|
||||
result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1200).hexread(32)
|
||||
expected = ("5c08eb61fdf71e4e4ec3cf6ba1f5512b"
|
||||
"a7e52ddbc5e5142f708a31e2e62b1e13")
|
||||
if result != expected:
|
||||
raise RuntimeError("self-test failed")
|
||||
|
||||
# Test 3
|
||||
result = PBKDF2("X"*64, "pass phrase equals block size", 1200).hexread(32)
|
||||
expected = ("139c30c0966bc32ba55fdbf212530ac9"
|
||||
"c5ec59f1a452f5cc9ad940fea0598ed1")
|
||||
if result != expected:
|
||||
raise RuntimeError("self-test failed")
|
||||
|
||||
# Test 4
|
||||
result = PBKDF2("X"*65, "pass phrase exceeds block size", 1200).hexread(32)
|
||||
expected = ("9ccad6d468770cd51b10e6a68721be61"
|
||||
"1a8b4d282601db3b36be9246915ec82a")
|
||||
if result != expected:
|
||||
raise RuntimeError("self-test failed")
|
||||
|
||||
#
|
||||
# Other test vectors
|
||||
#
|
||||
|
||||
# Chunked read
|
||||
f = PBKDF2("kickstart", "workbench", 256)
|
||||
result = f.read(17)
|
||||
result += f.read(17)
|
||||
result += f.read(1)
|
||||
result += f.read(2)
|
||||
result += f.read(3)
|
||||
expected = PBKDF2("kickstart", "workbench", 256).read(40)
|
||||
if result != expected:
|
||||
raise RuntimeError("self-test failed")
|
||||
|
||||
#
|
||||
# crypt() test vectors
|
||||
#
|
||||
|
||||
# crypt 1
|
||||
result = crypt("cloadm", "exec")
|
||||
expected = '$p5k2$$exec$r1EWMCMk7Rlv3L/RNcFXviDefYa0hlql'
|
||||
if result != expected:
|
||||
raise RuntimeError("self-test failed")
|
||||
|
||||
# crypt 2
|
||||
result = crypt("gnu", '$p5k2$c$u9HvcT4d$.....')
|
||||
expected = '$p5k2$c$u9HvcT4d$Sd1gwSVCLZYAuqZ25piRnbBEoAesaa/g'
|
||||
if result != expected:
|
||||
raise RuntimeError("self-test failed")
|
||||
|
||||
# crypt 3
|
||||
result = crypt("dcl", "tUsch7fU", iterations=13)
|
||||
expected = "$p5k2$d$tUsch7fU$nqDkaxMDOFBeJsTSfABsyn.PYUXilHwL"
|
||||
if result != expected:
|
||||
raise RuntimeError("self-test failed")
|
||||
|
||||
# crypt 4 (unicode)
|
||||
result = crypt(u'\u0399\u03c9\u03b1\u03bd\u03bd\u03b7\u03c2',
|
||||
'$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ')
|
||||
expected = '$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ'
|
||||
if result != expected:
|
||||
raise RuntimeError("self-test failed")
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_pbkdf2()
|
||||
|
||||
# vim:set ts=4 sw=4 sts=4 expandtab:
|
@ -0,0 +1,26 @@
|
||||
from Crypto.Cipher import AES
|
||||
|
||||
ZEROIV = "\x00"*16
|
||||
def removePadding(blocksize, s):
|
||||
'Remove rfc 1423 padding from string.'
|
||||
n = ord(s[-1]) # last byte contains number of padding bytes
|
||||
if n > blocksize or n > len(s):
|
||||
raise Exception('invalid padding')
|
||||
return s[:-n]
|
||||
|
||||
|
||||
def AESdecryptCBC(data, key, iv=ZEROIV, padding=False):
|
||||
if len(data) % 16:
|
||||
print "AESdecryptCBC: data length not /16, truncating"
|
||||
data = data[0:(len(data)/16) * 16]
|
||||
data = AES.new(key, AES.MODE_CBC, iv).decrypt(data)
|
||||
if padding:
|
||||
return removePadding(16, data)
|
||||
return data
|
||||
|
||||
def AESencryptCBC(data, key, iv=ZEROIV, padding=False):
|
||||
if len(data) % 16:
|
||||
print "AESdecryptCBC: data length not /16, truncating"
|
||||
data = data[0:(len(data)/16) * 16]
|
||||
data = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
|
||||
return data
|
@ -0,0 +1,70 @@
|
||||
import struct
|
||||
from Crypto.Cipher import AES
|
||||
|
||||
"""
|
||||
http://www.ietf.org/rfc/rfc3394.txt
|
||||
quick'n'dirty AES wrap implementation
|
||||
used by iOS 4 KeyStore kernel extension for wrapping/unwrapping encryption keys
|
||||
"""
|
||||
def unpack64bit(s):
|
||||
return struct.unpack(">Q",s)[0]
|
||||
def pack64bit(s):
|
||||
return struct.pack(">Q",s)
|
||||
|
||||
def AESUnwrap(kek, wrapped):
|
||||
C = []
|
||||
for i in xrange(len(wrapped)/8):
|
||||
C.append(unpack64bit(wrapped[i*8:i*8+8]))
|
||||
n = len(C) - 1
|
||||
R = [0] * (n+1)
|
||||
A = C[0]
|
||||
|
||||
for i in xrange(1,n+1):
|
||||
R[i] = C[i]
|
||||
|
||||
for j in reversed(xrange(0,6)):
|
||||
for i in reversed(xrange(1,n+1)):
|
||||
todec = pack64bit(A ^ (n*j+i))
|
||||
todec += pack64bit(R[i])
|
||||
B = AES.new(kek).decrypt(todec)
|
||||
A = unpack64bit(B[:8])
|
||||
R[i] = unpack64bit(B[8:])
|
||||
|
||||
#assert A == 0xa6a6a6a6a6a6a6a6, "AESUnwrap: integrity check FAIL, wrong kek ?"
|
||||
if A != 0xa6a6a6a6a6a6a6a6:
|
||||
#print "AESUnwrap: integrity check FAIL, wrong kek ?"
|
||||
return None
|
||||
res = "".join(map(pack64bit, R[1:]))
|
||||
return res
|
||||
|
||||
def AESwrap(kek, data):
|
||||
A = 0xa6a6a6a6a6a6a6a6
|
||||
R = [0]
|
||||
for i in xrange(len(data)/8):
|
||||
R.append(unpack64bit(data[i*8:i*8+8]))
|
||||
n = len(R) - 1
|
||||
|
||||
for j in xrange(0,6):
|
||||
for i in xrange(1,n+1):
|
||||
B = AES.new(kek).encrypt(pack64bit(A) + pack64bit(R[i]))
|
||||
A = unpack64bit(B[:8]) ^ (n*j+i)
|
||||
R[i] = unpack64bit(B[8:])
|
||||
|
||||
res = pack64bit(A) + "".join(map(pack64bit, R[1:]))
|
||||
return res
|
||||
|
||||
if __name__ == "__main__":
|
||||
#format (kek, data, expected_ciphertext)
|
||||
test_vectors = [
|
||||
("000102030405060708090A0B0C0D0E0F", "00112233445566778899AABBCCDDEEFF", "1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5"),
|
||||
("000102030405060708090A0B0C0D0E0F1011121314151617", "00112233445566778899AABBCCDDEEFF", "96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D"),
|
||||
("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", "00112233445566778899AABBCCDDEEFF", "64E8C3F9CE0F5BA263E9777905818A2A93C8191E7D6E8AE7"),
|
||||
("000102030405060708090A0B0C0D0E0F1011121314151617", "00112233445566778899AABBCCDDEEFF0001020304050607", "031D33264E15D33268F24EC260743EDCE1C6C7DDEE725A936BA814915C6762D2"),
|
||||
("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", "00112233445566778899AABBCCDDEEFF0001020304050607", "A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1"),
|
||||
("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", "00112233445566778899AABBCCDDEEFF000102030405060708090A0B0C0D0E0F", "28C9F404C4B810F4CBCCB35CFB87F8263F5786E2D80ED326CBC7F0E71A99F43BFB988B9B7A02DD21")
|
||||
]
|
||||
for kek, data, expected in test_vectors:
|
||||
ciphertext = AESwrap(kek.decode("hex"), data.decode("hex"))
|
||||
assert ciphertext == expected.decode("hex")
|
||||
assert AESUnwrap(kek.decode("hex"), ciphertext) == data.decode("hex")
|
||||
print "All tests OK !"
|
@ -0,0 +1,74 @@
|
||||
from Crypto.Util import number
|
||||
|
||||
CURVE_P = (2**255 - 19)
|
||||
CURVE_A = 121665
|
||||
|
||||
def curve25519_monty(x1, z1, x2, z2, qmqp):
|
||||
a = (x1 + z1) * (x2 - z2) % CURVE_P
|
||||
b = (x1 - z1) * (x2 + z2) % CURVE_P
|
||||
x4 = (a + b) * (a + b) % CURVE_P
|
||||
|
||||
e = (a - b) * (a - b) % CURVE_P
|
||||
z4 = e * qmqp % CURVE_P
|
||||
|
||||
a = (x1 + z1) * (x1 + z1) % CURVE_P
|
||||
b = (x1 - z1) * (x1 - z1) % CURVE_P
|
||||
x3 = a * b % CURVE_P
|
||||
|
||||
g = (a - b) % CURVE_P
|
||||
h = (a + CURVE_A * g) % CURVE_P
|
||||
z3 = (g * h) % CURVE_P
|
||||
|
||||
return x3, z3, x4, z4
|
||||
|
||||
def curve25519_mult(n, q):
|
||||
nqpqx, nqpqz = q, 1
|
||||
nqx, nqz = 1, 0
|
||||
|
||||
for i in range(255, -1, -1):
|
||||
if (n >> i) & 1:
|
||||
nqpqx,nqpqz,nqx,nqz = curve25519_monty(nqpqx, nqpqz, nqx, nqz, q)
|
||||
else:
|
||||
nqx,nqz,nqpqx,nqpqz = curve25519_monty(nqx, nqz, nqpqx, nqpqz, q)
|
||||
return nqx, nqz
|
||||
|
||||
def curve25519(secret, basepoint):
|
||||
a = ord(secret[0])
|
||||
a &= 248
|
||||
b = ord(secret[31])
|
||||
b &= 127
|
||||
b |= 64
|
||||
s = chr(a) + secret[1:-1] + chr(b)
|
||||
|
||||
s = number.bytes_to_long(s[::-1])
|
||||
basepoint = number.bytes_to_long(basepoint[::-1])
|
||||
|
||||
x, z = curve25519_mult(s, basepoint)
|
||||
zmone = number.inverse(z, CURVE_P)
|
||||
z = x * zmone % CURVE_P
|
||||
return number.long_to_bytes(z)[::-1]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from crypto.aeswrap import AESUnwrap
|
||||
from Crypto.Hash import SHA256
|
||||
|
||||
z="04000000080000000200000048000000000000000000000000000000000000000000000002917dc2542198edeb1078c4d1ebab74d9ca87890657ba02b9825dadf20a002f44360c6f87743fac0236df1f9eedbea801e31677aef3a09adfb4e10a37ae27facf419ab3ea3f39f4".decode("hex")
|
||||
|
||||
mysecret = "99b66345829d8c05041eea1ba1ed5b2984c3e5ec7a756ef053473c7f22b49f14".decode("hex")
|
||||
mypublic = "b1c652786697a5feef36a56f36fde524a21193f4e563627977ab515f600fdb3a".decode("hex")
|
||||
hispublic = z[36:36+32]
|
||||
|
||||
#c4d9fe462a2ebbf0745195ce7dc5e8b49947bbd5b42da74175d5f8125b44582b
|
||||
shared = curve25519(mysecret, hispublic)
|
||||
print shared.encode("hex")
|
||||
|
||||
h = SHA256.new()
|
||||
h.update('\x00\x00\x00\x01')
|
||||
h.update(shared)
|
||||
h.update(hispublic)
|
||||
h.update(mypublic)
|
||||
md = h.digest()
|
||||
|
||||
#e442c81b91ea876d3cf42d3aea75f4b0c3f90f9fd045e1f5784b91260f3bdc9c
|
||||
print AESUnwrap(md, z[32+36:]).encode("hex")
|
@ -0,0 +1,129 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util import strxor
|
||||
from struct import pack, unpack
|
||||
|
||||
def gcm_rightshift(vec):
|
||||
for x in range(15, 0, -1):
|
||||
c = vec[x] >> 1
|
||||
c |= (vec[x-1] << 7) & 0x80
|
||||
vec[x] = c
|
||||
vec[0] >>= 1
|
||||
return vec
|
||||
|
||||
def gcm_gf_mult(a, b):
|
||||
mask = [ 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01 ]
|
||||
poly = [ 0x00, 0xe1 ]
|
||||
|
||||
Z = [0] * 16
|
||||
V = [c for c in a]
|
||||
|
||||
for x in range(128):
|
||||
if b[x >> 3] & mask[x & 7]:
|
||||
Z = [V[y] ^ Z[y] for y in range(16)]
|
||||
bit = V[15] & 1
|
||||
V = gcm_rightshift(V)
|
||||
V[0] ^= poly[bit]
|
||||
return Z
|
||||
|
||||
def ghash(h, auth_data, data):
|
||||
u = (16 - len(data)) % 16
|
||||
v = (16 - len(auth_data)) % 16
|
||||
|
||||
x = auth_data + chr(0) * v + data + chr(0) * u
|
||||
x += pack('>QQ', len(auth_data) * 8, len(data) * 8)
|
||||
|
||||
y = [0] * 16
|
||||
vec_h = [ord(c) for c in h]
|
||||
|
||||
for i in range(0, len(x), 16):
|
||||
block = [ord(c) for c in x[i:i+16]]
|
||||
y = [y[j] ^ block[j] for j in range(16)]
|
||||
y = gcm_gf_mult(y, vec_h)
|
||||
|
||||
return ''.join(chr(c) for c in y)
|
||||
|
||||
def inc32(block):
|
||||
counter, = unpack('>L', block[12:])
|
||||
counter += 1
|
||||
return block[:12] + pack('>L', counter)
|
||||
|
||||
def gctr(k, icb, plaintext):
|
||||
y = ''
|
||||
if len(plaintext) == 0:
|
||||
return y
|
||||
|
||||
aes = AES.new(k)
|
||||
cb = icb
|
||||
|
||||
for i in range(0, len(plaintext), aes.block_size):
|
||||
cb = inc32(cb)
|
||||
encrypted = aes.encrypt(cb)
|
||||
plaintext_block = plaintext[i:i+aes.block_size]
|
||||
y += strxor.strxor(plaintext_block, encrypted[:len(plaintext_block)])
|
||||
|
||||
return y
|
||||
|
||||
def gcm_decrypt(k, iv, encrypted, auth_data, tag):
|
||||
aes = AES.new(k)
|
||||
h = aes.encrypt(chr(0) * aes.block_size)
|
||||
|
||||
if len(iv) == 12:
|
||||
y0 = iv + "\x00\x00\x00\x01"
|
||||
else:
|
||||
y0 = ghash(h, '', iv)
|
||||
|
||||
decrypted = gctr(k, y0, encrypted)
|
||||
s = ghash(h, auth_data, encrypted)
|
||||
|
||||
t = aes.encrypt(y0)
|
||||
T = strxor.strxor(s, t)
|
||||
if T != tag:
|
||||
raise ValueError('Decrypted data is invalid')
|
||||
else:
|
||||
return decrypted
|
||||
|
||||
def gcm_encrypt(k, iv, plaintext, auth_data):
|
||||
aes = AES.new(k)
|
||||
h = aes.encrypt(chr(0) * aes.block_size)
|
||||
|
||||
if len(iv) == 12:
|
||||
y0 = iv + "\x00\x00\x00\x01"
|
||||
else:
|
||||
y0 = ghash(h, '', iv)
|
||||
|
||||
encrypted = gctr(k, y0, plaintext)
|
||||
s = ghash(h, auth_data, encrypted)
|
||||
|
||||
t = aes.encrypt(y0)
|
||||
T = strxor.strxor(s, t)
|
||||
return (encrypted, T)
|
||||
|
||||
def main():
|
||||
#http://www.ieee802.org/1/files/public/docs2011/bn-randall-test-vectors-0511-v1.pdf
|
||||
k = 'AD7A2BD03EAC835A6F620FDCB506B345'.decode("hex")
|
||||
p = ''
|
||||
a = 'D609B1F056637A0D46DF998D88E5222AB2C2846512153524C0895E8108000F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F30313233340001'.decode("hex")
|
||||
iv = '12153524C0895E81B2C28465'.decode("hex")
|
||||
c, t = gcm_encrypt(k, iv, '', a)
|
||||
assert c == ""
|
||||
assert t == "f09478a9b09007d06f46e9b6a1da25dd".decode("hex")
|
||||
|
||||
k = 'AD7A2BD03EAC835A6F620FDCB506B345'.decode("hex")
|
||||
p = '08000F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A0002'.decode("hex")
|
||||
a = 'D609B1F056637A0D46DF998D88E52E00B2C2846512153524C0895E81'.decode("hex")
|
||||
iv = '12153524C0895E81B2C28465'.decode("hex")
|
||||
c, t = gcm_encrypt(k, iv, p, a)
|
||||
assert c == '701AFA1CC039C0D765128A665DAB69243899BF7318CCDC81C9931DA17FBE8EDD7D17CB8B4C26FC81E3284F2B7FBA713D'.decode("hex")
|
||||
assert t == '4F8D55E7D3F06FD5A13C0C29B9D5B880'.decode("hex")
|
||||
|
||||
key = "91bfb6cbcff07b93a4c68bbfe99ac63b713f0627025c0fb1ffc5b0812dc284f8".decode("hex")
|
||||
data = "020000000B00000028000000DE44D22E96B1966BAEF4CBEA8675871D40BA669401BD4EBB52AF9C025134187E70549012058456BF0EC0FA1F8FF9F822AC4312AB2141FA712E6D1482358EAC1421A1BFFA81EF38BD0BF2E52675D665EFE3C534E188F575774FAA92E74345575E370B9982661FAE8BD9243B7AD7D2105B275424C0CA1145B9D43AFF04F2747E40D62EC60563960D62A894BE66F267B14D75C0572BE60CC9B339D440FCB418D4F729BBF15C14E0D3A43E4A8B44523D8B3B0F3E7DF85AA67A707EE19CB893277D2392234D7DBC17DA4A0BD7F166189FC54C16C20D287E20FD2FB11BD2CE09ADBDABB95124CD4BFE219E34D3C80E69570A5A506555D7094916C5D75E0065F1796F556EDF0DAA1AA758E0C85AE3951BD363F26B1D43F6CBAEE12D97AD3B60CFA89C1C76BB29F2B54BE31B6CE166F4860C5E5DA92588EF53AA946DF159E60E6F05009D12FB1E37".decode("hex")
|
||||
ciphertext = data[12+40:-16]
|
||||
tag = data[-16:]
|
||||
print repr(gcm_decrypt(key, '', ciphertext, '', tag))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys, os
|
||||
from PyQt4 import QtGui, QtCore
|
||||
from backups.backup4 import MBDB
|
||||
from keychain.keychain4 import Keychain4
|
||||
from util.bplist import BPlistReader
|
||||
from keystore.keybag import Keybag
|
||||
from util import readPlist
|
||||
|
||||
class KeychainTreeWidget(QtGui.QTreeWidget):
|
||||
def __init__(self, parent=None):
|
||||
QtGui.QTreeWidget.__init__(self, parent)
|
||||
|
||||
self.setGeometry(10, 10, 780, 380)
|
||||
self.header().hide()
|
||||
self.setColumnCount(2)
|
||||
|
||||
class KeychainTreeWidgetItem(QtGui.QTreeWidgetItem):
|
||||
def __init__(self, title):
|
||||
QtGui.QTreeWidgetItem.__init__(self, [title])
|
||||
|
||||
fnt = self.font(0)
|
||||
fnt.setBold(True)
|
||||
self.setFont(0, fnt)
|
||||
self.setColors()
|
||||
|
||||
def setText(self, column, title):
|
||||
QtGui.QTreeWidgetItem.setText(self, column, title)
|
||||
|
||||
def setColors(self):
|
||||
self.setForeground(0, QtGui.QBrush(QtGui.QColor(80, 80, 80)))
|
||||
self.setBackground(0, QtGui.QBrush(QtGui.QColor(230, 230, 230)))
|
||||
self.setBackground(1, QtGui.QBrush(QtGui.QColor(230, 230, 230)))
|
||||
|
||||
class LockedKeychainTreeWidgetItem(KeychainTreeWidgetItem):
|
||||
def setColors(self):
|
||||
self.setForeground(0, QtGui.QBrush(QtGui.QColor(255, 80, 80)))
|
||||
self.setBackground(0, QtGui.QBrush(QtGui.QColor(255, 230, 230)))
|
||||
self.setBackground(1, QtGui.QBrush(QtGui.QColor(255, 230, 230)))
|
||||
|
||||
class KeychainWindow(QtGui.QWidget):
|
||||
def __init__(self, parent=None):
|
||||
QtGui.QWidget.__init__(self, parent)
|
||||
|
||||
self.setGeometry(100, 100, 800, 400)
|
||||
self.setWindowTitle('Keychain Explorer')
|
||||
|
||||
self.passwordTree = KeychainTreeWidget(parent=self)
|
||||
|
||||
def setGenericPasswords(self, pwds):
|
||||
self.genericPasswords = pwds
|
||||
|
||||
self.passwordItems = KeychainTreeWidgetItem('Generic Passwords')
|
||||
|
||||
for pwd in self.genericPasswords:
|
||||
if not pwd.has_key('acct'):
|
||||
continue
|
||||
if len(pwd['acct']) > 0:
|
||||
item_title = '%s (%s)' % (pwd['svce'], pwd['acct'])
|
||||
else:
|
||||
item_title = pwd['svce']
|
||||
|
||||
if pwd['data'] is None:
|
||||
item = LockedKeychainTreeWidgetItem(item_title)
|
||||
else:
|
||||
item = KeychainTreeWidgetItem(item_title)
|
||||
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Service', pwd['svce']]))
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Account', pwd['acct']]))
|
||||
if pwd['data'] is not None:
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Data', pwd['data']]))
|
||||
else:
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Data', 'N/A']))
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Access Group', pwd['agrp']]))
|
||||
|
||||
self.passwordItems.addChild(item)
|
||||
|
||||
self.passwordTree.addTopLevelItem(self.passwordItems)
|
||||
|
||||
self.passwordTree.expandAll()
|
||||
self.passwordTree.resizeColumnToContents(0)
|
||||
|
||||
def setInternetPasswords(self, pwds):
|
||||
self.internetPasswords = pwds
|
||||
|
||||
self.internetPasswordItems = KeychainTreeWidgetItem('Internet Passwords')
|
||||
|
||||
for pwd in pwds:
|
||||
item_title = '%s (%s)' % (pwd['srvr'], pwd['acct'])
|
||||
|
||||
item = KeychainTreeWidgetItem(item_title)
|
||||
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Server', pwd['srvr']]))
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Account', pwd['acct']]))
|
||||
if pwd['data'] is not None:
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Data', pwd['data']]))
|
||||
else:
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Data', 'N/A']))
|
||||
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Port', str(pwd['port'])]))
|
||||
item.addChild(QtGui.QTreeWidgetItem(['Access Group', pwd['agrp']]))
|
||||
|
||||
self.internetPasswordItems.addChild(item)
|
||||
|
||||
self.passwordTree.addTopLevelItem(self.internetPasswordItems)
|
||||
|
||||
self.passwordTree.expandAll()
|
||||
self.passwordTree.resizeColumnToContents(0)
|
||||
|
||||
def warn(msg):
|
||||
print "WARNING: %s" % msg
|
||||
|
||||
def getBackupKeyBag(backupfolder, passphrase):
|
||||
manifest = readPlist(backupfolder + "/Manifest.plist")
|
||||
|
||||
kb = Keybag(manifest["BackupKeyBag"].data)
|
||||
|
||||
if kb.unlockBackupKeybagWithPasscode(passphrase):
|
||||
print "BackupKeyBag unlock OK"
|
||||
return kb
|
||||
else:
|
||||
return None
|
||||
|
||||
def main():
|
||||
app = QtGui.QApplication(sys.argv)
|
||||
init_path = "{0:s}/Apple Computer/MobileSync/Backup".format(os.getenv('APPDATA'))
|
||||
dirname = QtGui.QFileDialog.getExistingDirectory(None, "Select iTunes backup directory", init_path)
|
||||
kb = getBackupKeyBag(dirname, 'pouet') #XXX: hardcoded password for demo
|
||||
if not kb:
|
||||
warn("Backup keybag unlock fail : wrong passcode?")
|
||||
return
|
||||
db = MBDB(dirname)
|
||||
db.keybag = kb
|
||||
filename, record = db.get_file_by_name("keychain-backup.plist")
|
||||
keychain_data = db.read_file(filename, record)
|
||||
|
||||
f = file('keychain.tmp', 'wb')
|
||||
f.write(keychain_data)
|
||||
f.close()
|
||||
|
||||
kc = Keychain4('keychain.tmp', kb)
|
||||
|
||||
pwds = kc.get_passwords()
|
||||
inet_pwds = kc.get_inet_passwords()
|
||||
|
||||
qb = KeychainWindow()
|
||||
qb.setGenericPasswords(pwds)
|
||||
qb.setInternetPasswords(inet_pwds)
|
||||
qb.show()
|
||||
|
||||
sys.exit(app.exec_())
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,88 @@
|
||||
import plistlib
|
||||
import os
|
||||
from keystore.keybag import Keybag
|
||||
from keychain.keychain4 import Keychain4
|
||||
from keychain.managedconfiguration import bruteforce_old_pass
|
||||
from util.ramdiskclient import RamdiskToolClient
|
||||
from util import write_file
|
||||
|
||||
def bf_system():
|
||||
curdir = os.path.dirname(os.path.abspath(__file__))
|
||||
client = RamdiskToolClient()
|
||||
di = client.getDeviceInfos()
|
||||
devicedir = di["udid"]
|
||||
if os.getcwd().find(devicedir) == -1:
|
||||
try:
|
||||
os.mkdir(devicedir)
|
||||
except:
|
||||
pass
|
||||
os.chdir(devicedir)
|
||||
key835 = di.get("key835").decode("hex")
|
||||
|
||||
systembag = client.getSystemKeyBag()
|
||||
kbkeys = systembag["KeyBagKeys"].data
|
||||
kb = Keybag.createWithDataSignBlob(kbkeys, key835)
|
||||
keybags = di.setdefault("keybags", {})
|
||||
kbuuid = kb.uuid.encode("hex")
|
||||
print "Keybag UUID :", kbuuid
|
||||
if True and keybags.has_key(kbuuid) and keybags[kbuuid].has_key("passcodeKey"):
|
||||
print "We've already seen this keybag"
|
||||
passcodeKey = keybags[kbuuid].get("passcodeKey").decode("hex")
|
||||
print kb.unlockWithPasscodeKey(passcodeKey)
|
||||
kb.printClassKeys()
|
||||
else:
|
||||
keybags[kbuuid] = {"KeyBagKeys": systembag["KeyBagKeys"]}
|
||||
di["KeyBagKeys"] = systembag["KeyBagKeys"]
|
||||
di.save()
|
||||
print "Enter passcode or leave blank for bruteforce:"
|
||||
z = raw_input()
|
||||
res = client.getPasscodeKey(systembag["KeyBagKeys"].data, z)
|
||||
if kb.unlockWithPasscodeKey(res.get("passcodeKey").decode("hex")):
|
||||
print "Passcode \"%s\" OK" % z
|
||||
di.update(res)
|
||||
keybags[kbuuid].update(res)
|
||||
di.save()
|
||||
keychain_blob = client.downloadFile("/mnt2/Keychains/keychain-2.db")
|
||||
write_file("keychain-2.db", keychain_blob)
|
||||
print "Downloaded keychain database, use keychain_tool.py to decrypt secrets"
|
||||
return
|
||||
if z != "":
|
||||
print "Wrong passcode, trying to bruteforce !"
|
||||
if kb.passcodeComplexity == 0:
|
||||
print "Trying all 4-digits passcodes..."
|
||||
bf = client.bruteforceKeyBag(systembag["KeyBagKeys"].data)
|
||||
if bf:
|
||||
di.update(bf)
|
||||
keybags[kbuuid].update(bf)
|
||||
print bf
|
||||
print kb.unlockWithPasscodeKey(bf.get("passcodeKey").decode("hex"))
|
||||
kb.printClassKeys()
|
||||
di["classKeys"] = kb.getClearClassKeysDict()
|
||||
di.save()
|
||||
else:
|
||||
print "Complex passcode used, trying dictionary attack ..."
|
||||
dictfile = os.path.join(curdir, 'wordlist.dict')
|
||||
try:
|
||||
wordlist = open(dictfile, 'r').readlines()
|
||||
except (OSError, IOError), e:
|
||||
exit(e)
|
||||
for line in wordlist:
|
||||
res = client.getPasscodeKey(systembag["KeyBagKeys"].data, line.rstrip('\n'))
|
||||
if kb.unlockWithPasscodeKey(res.get("passcodeKey").decode("hex")):
|
||||
print "Passcode \"%s\" OK" % line.rstrip('\n')
|
||||
di.update(res)
|
||||
keybags[kbuuid].update(res)
|
||||
di.save()
|
||||
keychain_blob = client.downloadFile("/mnt2/Keychains/keychain-2.db")
|
||||
write_file("keychain-2.db", keychain_blob)
|
||||
print "Downloaded keychain database, use keychain_tool.py to decrypt secrets"
|
||||
return
|
||||
print "Passcode not found!"
|
||||
return
|
||||
|
||||
#keychain_blob = client.downloadFile("/private/var/Keychains/keychain-2.db")
|
||||
keychain_blob = client.downloadFile("/mnt2/Keychains/keychain-2.db")
|
||||
write_file("keychain-2.db", keychain_blob)
|
||||
print "Downloaded keychain database, use keychain_tool.py to decrypt secrets"
|
||||
|
||||
bf_system()
|
@ -0,0 +1,38 @@
|
||||
import os
|
||||
import plistlib
|
||||
from keystore.keybag import Keybag
|
||||
from util.ramdiskclient import RamdiskToolClient
|
||||
|
||||
"""
|
||||
this wont work on iOS 5 unless the passcode was already bruteforced
|
||||
"""
|
||||
def escrow():
|
||||
client = RamdiskToolClient()
|
||||
di = client.getDeviceInfos()
|
||||
key835 = di.get("key835").decode("hex")
|
||||
|
||||
plist = os.environ["ALLUSERSPROFILE"] + "/Apple/Lockdown/%s.plist" % di["udid"]
|
||||
lockdown = plistlib.readPlist(plist)
|
||||
kb = Keybag.createWithDataSignBlob(lockdown["EscrowBag"].data, key835)
|
||||
|
||||
keybags = di.setdefault("keybags", {})
|
||||
kbuuid = kb.uuid.encode("hex")
|
||||
if not keybags.has_key(kbuuid):
|
||||
print lockdown["HostID"]
|
||||
res = client.getEscrowRecord(lockdown["HostID"])
|
||||
bagkey = res.get("BagKey")
|
||||
print "Bag key" + bagkey.data.encode("hex")
|
||||
res = client.getPasscodeKey(lockdown["EscrowBag"].data, bagkey)
|
||||
print res
|
||||
passcodeKey = res["passcodeKey"].decode("hex")
|
||||
keybags[kbuuid] = {"KeyBagKeys": lockdown["EscrowBag"],
|
||||
"passcode": bagkey,
|
||||
"passcodeKey": passcodeKey.encode("hex")}
|
||||
pl.update(keybags[kbuuid])
|
||||
else:
|
||||
passcodeKey = keybags[kbuuid].get("passcodeKey").decode("hex")
|
||||
|
||||
print kb.unlockWithPasscodeKey(passcodeKey)
|
||||
kb.printClassKeys()
|
||||
|
||||
escrow()
|
@ -0,0 +1,34 @@
|
||||
from optparse import OptionParser
|
||||
from hfs.emf import EMFVolume
|
||||
from util.bdev import FileBlockDevice
|
||||
import plistlib
|
||||
|
||||
def main():
|
||||
parser = OptionParser(usage="emf_decrypter.py disk_image.bin")
|
||||
parser.add_option("-w", "--nowrite", dest="write", action="store_false", default=True,
|
||||
help="disable modifications of input file, for testing")
|
||||
(options, args) = parser.parse_args()
|
||||
if len(args) < 1:
|
||||
parser.print_help()
|
||||
return
|
||||
device_infos = None
|
||||
if len(args) >= 2: device_infos = plistlib.readPlist(args[1])
|
||||
|
||||
p = FileBlockDevice(args[0], 0, options.write)
|
||||
v = EMFVolume(p, device_infos)
|
||||
if not v.keybag.unlocked:
|
||||
print "Keybag locked, protected files won't be decrypted, continue anyway ?"
|
||||
if raw_input() == "n":
|
||||
return
|
||||
if options.write:
|
||||
print "WARNING ! This tool will modify the hfs image and possibly wreck it if something goes wrong !"
|
||||
print "Make sure to backup the image before proceeding"
|
||||
print "You can use the --nowrite option to do a dry run instead"
|
||||
else:
|
||||
print "Test mode : the input file will not be modified"
|
||||
print "Press a key to continue or CTRL-C to abort"
|
||||
raw_input()
|
||||
v.decryptAllFiles()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,25 @@
|
||||
import os
|
||||
import sys
|
||||
from hfs.emf import EMFVolume
|
||||
from hfs.journal import do_emf_carving
|
||||
from util.bdev import FileBlockDevice
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print "Usage: emf_undelete.py disk_image.bin"
|
||||
sys.exit(0)
|
||||
filename = sys.argv[1]
|
||||
volume = EMFVolume(FileBlockDevice(filename), None)
|
||||
dirname = os.path.dirname(filename)
|
||||
if dirname == "":
|
||||
dirname = "."
|
||||
outdir = dirname + "/" + volume.volumeID().encode("hex") + "_" + os.path.basename(filename)
|
||||
carveokdir = outdir + "/undelete/"
|
||||
carvenokdir = outdir + "/junk/"
|
||||
try:
|
||||
os.makedirs(carveokdir)
|
||||
os.makedirs(carvenokdir)
|
||||
except:
|
||||
pass
|
||||
|
||||
do_emf_carving(volume, carveokdir, carvenokdir)
|
@ -0,0 +1,12 @@
|
||||
from construct.core import Struct
|
||||
from construct.macros import *
|
||||
|
||||
IMG2 = Struct("IMG2",
|
||||
String("magic",4),
|
||||
ULInt32("block_size"),
|
||||
ULInt32("images_offset"),
|
||||
ULInt32("images_block"),
|
||||
ULInt32("images_length"),
|
||||
Padding(0x1C),
|
||||
ULInt32("crc32"),
|
||||
)
|
@ -0,0 +1,292 @@
|
||||
from crypto.aes import AESdecryptCBC
|
||||
from util import read_file, write_file
|
||||
from util.ramdiskclient import RamdiskToolClient
|
||||
import M2Crypto
|
||||
import struct
|
||||
import hashlib
|
||||
import os
|
||||
import sys
|
||||
|
||||
def decryptGID(data):
|
||||
try:
|
||||
client = RamdiskToolClient.get()
|
||||
except:
|
||||
return None
|
||||
r = client.aesGID(data)
|
||||
if r and r.has_key("data"):
|
||||
return r.data.data
|
||||
return None
|
||||
|
||||
def decryptPseudoGID(data):
|
||||
pseudogid = "5F650295E1FFFC97CE77ABD49DD955B3".decode("hex")
|
||||
return AESdecryptCBC(data, pseudogid, padding=False)
|
||||
|
||||
def dword(s,i):
|
||||
return struct.unpack("<L", s[i:i+4])[0]
|
||||
|
||||
def extract_img3s(blob):
|
||||
i = 0
|
||||
res = []
|
||||
while i < len(blob):
|
||||
if blob[i:i+4] != "3gmI":
|
||||
break
|
||||
TYPE = blob[i+16:i+20][::-1]
|
||||
l = struct.unpack("<L", blob[i+4:i+8])[0]
|
||||
data = blob[i:i+l]
|
||||
img3 = Img3(TYPE, data)
|
||||
res.append(img3)
|
||||
i += l
|
||||
return res
|
||||
|
||||
class Img3:
|
||||
INT_FIELDS = ["SEPO", "SDOM", "BORD", "CHIP", "PROD"]
|
||||
|
||||
rootCert = None
|
||||
def __init__(self, filename, data=None):
|
||||
self.filename = filename
|
||||
self.shortname = os.path.basename(filename)
|
||||
self.certs = None
|
||||
if not data:
|
||||
img3 = read_file(filename)
|
||||
else:
|
||||
img3 = data
|
||||
self.img3 = img3
|
||||
self.ecidoffset = 0
|
||||
|
||||
if img3[0:4] != '3gmI':
|
||||
print "Magic 3gmI not found in " + filename
|
||||
return
|
||||
|
||||
fullSize = dword(img3, 4)
|
||||
sizeNoPack = dword(img3, 8)
|
||||
sigCheckArea = dword(img3, 12)
|
||||
|
||||
self.sha1 = hashlib.sha1(img3)
|
||||
self.fileHash = hashlib.sha1(img3[12:20+sigCheckArea])
|
||||
|
||||
i = 20
|
||||
|
||||
sections = {}
|
||||
|
||||
while i < fullSize:
|
||||
tag = img3[i:i+4][::-1] #reverse fourcc tag
|
||||
total_length = dword(img3, i+4)
|
||||
data_length = dword(img3, i+8)
|
||||
|
||||
if tag == "DATA":
|
||||
self.datalen = data_length
|
||||
data = img3[i+12:i+total_length]
|
||||
else:
|
||||
data = img3[i+12:i+12+data_length]
|
||||
|
||||
if tag in Img3.INT_FIELDS:
|
||||
data = struct.unpack("<L", data)[0]
|
||||
elif tag == "VERS":
|
||||
data = data[4:]
|
||||
elif tag == "TYPE":
|
||||
data = data[::-1]
|
||||
elif tag == "ECID":
|
||||
self.ecidoffset = i
|
||||
#print "%s offset=%x len=%x" % (tag,i, data_length)
|
||||
if tag != "KBAG" or dword(data,0) == 1:
|
||||
sections[tag] = data
|
||||
|
||||
i += total_length
|
||||
|
||||
self.sections = sections
|
||||
self.leaf_cert = None
|
||||
self.sig = None
|
||||
self.key = ""
|
||||
self.iv = ""
|
||||
self.extractCertificates()
|
||||
#self.sigcheck()
|
||||
|
||||
def isEncrypted(self):
|
||||
return self.sections.has_key("KBAG")
|
||||
|
||||
@staticmethod
|
||||
def setRootCert(filename):
|
||||
try:
|
||||
Img3.rootCert = M2Crypto.X509.load_cert_der_string(open(filename,"rb").read())
|
||||
except:
|
||||
print "IMG3.setRootCert failed loading %s" % filename
|
||||
|
||||
def extractCertificates(self):
|
||||
if not self.sections.has_key("CERT"):
|
||||
return
|
||||
|
||||
certs = {}
|
||||
i = 0
|
||||
|
||||
while i < len(self.sections["CERT"]):
|
||||
data = self.sections["CERT"][i:]
|
||||
cert = M2Crypto.X509.load_cert_der_string(data)
|
||||
name = cert.get_subject().as_text()
|
||||
#name = name[name.find("CN=")+3:]
|
||||
#print name
|
||||
certs[name] = cert
|
||||
i += len(cert.as_der())
|
||||
|
||||
#XXX nested Img3 in leaf cert 1.2.840.113635.100.6.1.1
|
||||
#CFTypeRef kSecOIDAPPLE_EXTENSION_APPLE_SIGNING = CFSTR("1.2.840.113635.100.6.1.1");
|
||||
z = data.find("3gmI")
|
||||
if z != -1:
|
||||
zz = Img3("cert", data[z:])
|
||||
self.sections.update(zz.sections)
|
||||
|
||||
#assume leaf cert is last
|
||||
self.certs = certs
|
||||
self.leaf_cert = cert
|
||||
self.leaf_name = name
|
||||
|
||||
def writeCerts(self):
|
||||
if not self.certs:
|
||||
self.extractCertificates()
|
||||
|
||||
for key, cert in self.certs.items():
|
||||
cert_data = cert.as_der()
|
||||
cert_sha1 = hashlib.sha1(cert_data).hexdigest()
|
||||
write_file("%s_%s.crt" % (key, cert_sha1), cert_data)
|
||||
|
||||
"""
|
||||
Decrypt SHSH section with leaf certificate public key
|
||||
output should be the SHA1 of img3[12:20+sigCheckArea]
|
||||
"""
|
||||
def sigcheck(self, k89A=None):
|
||||
if not self.sections.has_key("SHSH"):
|
||||
print "[x] FAIL sigcheck %s : no SHSH section" % self.shortname
|
||||
return False
|
||||
|
||||
if not self.leaf_cert:
|
||||
#print "Extracting certificates"
|
||||
self.extractCertificates()
|
||||
cert = self.leaf_cert
|
||||
#print "Leaf cert subject: %s" % cert.get_subject()
|
||||
certChainOk = False
|
||||
while True:
|
||||
issuer = cert.get_issuer().as_text()
|
||||
#print "issuer: %s" % issuer
|
||||
if not self.certs.has_key(issuer):
|
||||
if not Img3.rootCert:
|
||||
print "Cert chain stops at %s" % issuer
|
||||
certChainOk = False
|
||||
break
|
||||
#print "Verifying cert.",
|
||||
certChainOk = cert.verify(Img3.rootCert.get_pubkey())
|
||||
break
|
||||
issuer = self.certs[issuer]
|
||||
if not cert.verify(issuer.get_pubkey()):
|
||||
print "%s is not signed by %s (verify fail)" % (cert.get_subject().as_text(), issuer.get_subject().as_text())
|
||||
return False
|
||||
cert = issuer
|
||||
shsh = self.sections["SHSH"]
|
||||
print "Got SHSH"
|
||||
|
||||
try:
|
||||
sig = self.leaf_cert.get_pubkey().get_rsa().public_decrypt(shsh, M2Crypto.RSA.pkcs1_padding)
|
||||
except:
|
||||
if k89A == None:
|
||||
print "SHSH RSA decrypt FAIL, IMG3 must be personalized (SHSH encrypted with k89A)"
|
||||
return False
|
||||
try:
|
||||
shsh = AESdecryptCBC(shsh, k89A)
|
||||
sig = self.leaf_cert.get_pubkey().get_rsa().public_decrypt(shsh, M2Crypto.RSA.pkcs1_padding)
|
||||
except:
|
||||
raise
|
||||
return False
|
||||
|
||||
#DigestInfo SHA1 http://www.ietf.org/rfc/rfc3447.txt
|
||||
sha1_digestInfo = "3021300906052b0e03021a05000414".decode("hex")
|
||||
if sig[:len(sha1_digestInfo)] == sha1_digestInfo:
|
||||
pass#print "DigestInfo SHA1 OK"
|
||||
|
||||
self.sig = sig = sig[len(sha1_digestInfo):]
|
||||
|
||||
ok = sig == self.fileHash.digest()
|
||||
|
||||
if ok:
|
||||
print "%s : signature check OK (%s)" % (self.shortname, self.leaf_name)
|
||||
else:
|
||||
print "Signature check for %s failed" % self.shortname
|
||||
print "Decrypted SHA1 " + sig.encode("hex")
|
||||
print "Sigcheck area SHA1 " + self.fileHash.hexdigest()
|
||||
return ok
|
||||
|
||||
def ticketHash(self):
|
||||
#sigchecklen = struct.unpack("<L", self.img3[12:16])[0]
|
||||
tohash = struct.pack("<L", self.ecidoffset - 20) + self.img3[16:12 + self.ecidoffset - 20+8]
|
||||
return hashlib.sha1(tohash).digest()
|
||||
|
||||
def setIvAndKey(self, iv, key):
|
||||
self.iv = iv
|
||||
self.key = key
|
||||
|
||||
def decryptKBAG(self):
|
||||
if self.iv and self.key:
|
||||
print "No need to decrypt KBAG"
|
||||
return
|
||||
if not self.sections.has_key("KBAG"):
|
||||
print "FAIL: decrypt_kbag no KBAG section for %s" % self.filename
|
||||
return
|
||||
|
||||
kbag = self.sections["KBAG"]
|
||||
|
||||
cryptState = dword(kbag,0)
|
||||
|
||||
if cryptState != 1:
|
||||
print "FAIL: cryptState = %d" % cryptState
|
||||
|
||||
aesType = dword(kbag,4)
|
||||
|
||||
if aesType != 128 and aesType != 192 and aesType != 256:
|
||||
print "FAIL: aesType = %d" % aesType
|
||||
|
||||
keySize = aesType / 8
|
||||
#print "KBAG keySize = " + str(keySize)
|
||||
#print "KBAG = %s" % kbag.encode("hex")
|
||||
#kbag_dec = decryptPseudoGID(kbag[8:8+16+keySize])
|
||||
kbag_dec = decryptGID(kbag[8:8+16+keySize])
|
||||
if not kbag_dec:
|
||||
return False
|
||||
|
||||
self.iv = kbag_dec[:16]
|
||||
self.key = kbag_dec[16:]
|
||||
return True
|
||||
|
||||
def isValidDecryptedData(self, data):
|
||||
if len(data) > 16 and data.startswith("complzss"):
|
||||
return "kernel"
|
||||
if len(data) > 0x800 and data[0x400:0x400+2] == "H+":
|
||||
return "ramdisk"
|
||||
if len(data) > 0x300 and data[0x280:0x285] == "iBoot":
|
||||
return "bootloader";
|
||||
if data.find("serial-number") != -1:
|
||||
return "devicetree"
|
||||
if data.startswith("iBootIm"):
|
||||
return "bootlogo"
|
||||
|
||||
def getRawData(self):
|
||||
return self.sections["DATA"][:self.datalen]
|
||||
|
||||
def decryptData(self, key=None, iv=None):
|
||||
if not self.sections.has_key("KBAG"):
|
||||
return self.getRawData()
|
||||
|
||||
if not key or not iv:
|
||||
if not self.decryptKBAG():
|
||||
return
|
||||
key = self.key
|
||||
iv = self.iv
|
||||
|
||||
data = AESdecryptCBC(self.sections["DATA"], key, iv)
|
||||
x = self.isValidDecryptedData(data)
|
||||
if not x:
|
||||
print >> sys.stderr, "%s : Decrypted data seems invalid" % self.shortname
|
||||
print >> sys.stderr, data[:50].encode("hex")
|
||||
return False
|
||||
print "%s : decrypted OK (%s)" % (self.shortname, x)
|
||||
return data[:self.datalen]
|
||||
|
||||
if __name__ == "__main__":
|
||||
img3 = Img3(sys.argv[1])
|
||||
img3.sigcheck()
|
@ -0,0 +1,27 @@
|
||||
from construct.core import Struct
|
||||
from construct.macros import *
|
||||
from construct import RepeatUntil, OneOf
|
||||
from util import hexdump
|
||||
|
||||
SCFGItem = Struct("SCFGItem",
|
||||
String("tag", 4),
|
||||
String("data", 16, padchar="\x00")
|
||||
)
|
||||
|
||||
SCFG = Struct("SCFG",
|
||||
OneOf(String("magic", 4), ["gfCS"]),
|
||||
ULInt32("length"),
|
||||
ULInt32("unk1"),
|
||||
ULInt32("unk2"),
|
||||
ULInt32("unk3"),
|
||||
ULInt32("unk4")
|
||||
)
|
||||
|
||||
def parse_SCFG(data):
|
||||
res = {}
|
||||
scfg = SCFG.parse(data)
|
||||
assert scfg.length > 0x18
|
||||
for i in Array((scfg.length - 0x18) / 20, SCFGItem).parse(data[0x18:scfg.length]):
|
||||
if i.tag != "\xFF\xFF\xFF\xFF":
|
||||
res[str(i.tag)[::-1]] = str(i.data)
|
||||
return res
|
268
dump-imessages/iphone-dataprotection/python_scripts/hfs/btree.py
Normal file
268
dump-imessages/iphone-dataprotection/python_scripts/hfs/btree.py
Normal file
@ -0,0 +1,268 @@
|
||||
from structs import *
|
||||
|
||||
"""
|
||||
Probably buggy
|
||||
HAX, only works on case SENSITIVE
|
||||
"""
|
||||
|
||||
class BTree(object):
|
||||
def __init__(self, file, keyStruct, dataStruct):
|
||||
self.file = file
|
||||
self.keyStruct = keyStruct
|
||||
self.dataStruct = dataStruct
|
||||
block0 = self.file.readBlock(0)
|
||||
btnode = BTNodeDescriptor.parse(block0)
|
||||
assert btnode.kind == kBTHeaderNode
|
||||
self.header = BTHeaderRec.parse(block0[BTNodeDescriptor.sizeof():])
|
||||
assert self.header.keyCompareType == 0 or self.header.keyCompareType == 0 or kHFSBinaryCompare
|
||||
#TODO: do more testing when nodeSize != blockSize
|
||||
self.nodeSize = self.header.nodeSize
|
||||
self.nodesInBlock = file.blockSize / self.header.nodeSize
|
||||
self.blocksForNode = self.header.nodeSize / file.blockSize
|
||||
#print file.blockSize , self.header.nodeSize
|
||||
self.lastRecordNumber = 0
|
||||
type, (hdr, maprec) = self.readBtreeNode(0)
|
||||
assert len(maprec) == self.nodeSize - 256
|
||||
if self.header.totalNodes / 8 > len(maprec):
|
||||
pass #TODO: handle map records
|
||||
self.maprec = maprec
|
||||
|
||||
def isNodeInUse(self, nodeNumber):
|
||||
thisByte = ord(self.maprec[nodeNumber / 8])
|
||||
return (thisByte & (1 << (7 - (nodeNumber % 8)))) != 0
|
||||
|
||||
def readEmptySpace(self):
|
||||
res = ""
|
||||
z = 0
|
||||
for i in xrange(self.header.totalNodes):
|
||||
if not self.isNodeInUse(i):
|
||||
z += 1
|
||||
res += self.readNode(i)
|
||||
assert z == self.header.freeNodes
|
||||
return res
|
||||
|
||||
#convert construct structure to tuple
|
||||
def getComparableKey(self, k):
|
||||
raise Exception("implement in subclass")
|
||||
|
||||
def compareKeys(self, k1, k2):
|
||||
k2 = self.getComparableKey(k2)
|
||||
if k1 == k2:
|
||||
return 0
|
||||
return -1 if k1 < k2 else 1
|
||||
|
||||
def printLeaf(self, key, data):
|
||||
print key, data
|
||||
|
||||
def readNode(self, nodeNumber):
|
||||
node = ""
|
||||
for i in xrange(self.blocksForNode):
|
||||
node += self.file.readBlock(nodeNumber * self.blocksForNode + i)
|
||||
return node
|
||||
|
||||
def readBtreeNode(self, nodeNumber):
|
||||
self.lastnodeNumber = nodeNumber
|
||||
node = self.readNode(nodeNumber)
|
||||
self.lastbtnode = btnode = BTNodeDescriptor.parse(node)
|
||||
|
||||
if btnode.kind == kBTHeaderNode:
|
||||
assert btnode.numRecords == 3
|
||||
end = self.nodeSize - 8 #2*4
|
||||
offsets = Array(btnode.numRecords+1, UBInt16("off")).parse(node[end:])
|
||||
assert offsets[-4] == end
|
||||
hdr = BTHeaderRec.parse(node[BTNodeDescriptor.sizeof():])
|
||||
maprec = node[offsets[-3]:end]
|
||||
return kBTHeaderNode, [hdr, maprec]
|
||||
elif btnode.kind == kBTIndexNode:
|
||||
recs = []
|
||||
offsets = Array(btnode.numRecords, UBInt16("off")).parse(node[-2*btnode.numRecords:])
|
||||
for i in xrange(btnode.numRecords):
|
||||
off = offsets[btnode.numRecords-i-1]
|
||||
k = self.keyStruct.parse(node[off:])
|
||||
off += 2 + k.keyLength
|
||||
k.childNode = UBInt32("nodeNumber").parse(node[off:off+4])
|
||||
recs.append(k)
|
||||
return kBTIndexNode, recs
|
||||
elif btnode.kind == kBTLeafNode:
|
||||
recs = []
|
||||
offsets = Array(btnode.numRecords, UBInt16("off")).parse(node[-2*btnode.numRecords:])
|
||||
for i in xrange(btnode.numRecords):
|
||||
off = offsets[btnode.numRecords-i-1]
|
||||
k = self.keyStruct.parse(node[off:])
|
||||
off += 2 + k.keyLength
|
||||
d = self.dataStruct.parse(node[off:])
|
||||
recs.append((k,d))
|
||||
return kBTLeafNode, recs
|
||||
else:
|
||||
raise Exception("Invalid node type " + str(btnode))
|
||||
|
||||
def search(self, searchKey, node=None):
|
||||
if node == None:
|
||||
node = self.header.rootNode
|
||||
|
||||
type, stuff = self.readBtreeNode(node)
|
||||
if len(stuff) == 0:
|
||||
return None, None
|
||||
|
||||
if type == kBTIndexNode:
|
||||
for i in xrange(len(stuff)):
|
||||
if self.compareKeys(searchKey, stuff[i]) < 0:
|
||||
if i > 0:
|
||||
i = i - 1
|
||||
return self.search(searchKey, stuff[i].childNode)
|
||||
return self.search(searchKey, stuff[len(stuff)-1].childNode)
|
||||
elif type == kBTLeafNode:
|
||||
self.lastRecordNumber = 0
|
||||
for k,v in stuff:
|
||||
res = self.compareKeys(searchKey, k)
|
||||
if res == 0:
|
||||
return k, v
|
||||
if res < 0:
|
||||
return None, None
|
||||
self.lastRecordNumber += 1
|
||||
return None, None
|
||||
|
||||
def traverse(self, node=None, count=0, callback=None):
|
||||
if node == None:
|
||||
node = self.header.rootNode
|
||||
|
||||
type, stuff = self.readBtreeNode(node)
|
||||
|
||||
if type == kBTIndexNode:
|
||||
for i in xrange(len(stuff)):
|
||||
count += self.traverse(stuff[i].childNode, callback=callback)
|
||||
elif type == kBTLeafNode:
|
||||
for k,v in stuff:
|
||||
if callback:
|
||||
callback(k,v)
|
||||
else:
|
||||
self.printLeaf(k, v)
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def traverseLeafNodes(self, callback=None):
|
||||
nodeNumber = self.header.firstLeafNode
|
||||
count = 0
|
||||
while nodeNumber != 0:
|
||||
_, stuff = self.readBtreeNode(nodeNumber)
|
||||
count += len(stuff)
|
||||
for k,v in stuff:
|
||||
if callback:
|
||||
callback(k,v)
|
||||
else:
|
||||
self.printLeaf(k, v)
|
||||
nodeNumber = self.lastbtnode.fLink
|
||||
return count
|
||||
|
||||
#XXX
|
||||
def searchMultiple(self, searchKey, filterKeyFunction=lambda x:False):
|
||||
self.search(searchKey)
|
||||
nodeNumber = self.lastnodeNumber
|
||||
recordNumber = self.lastRecordNumber
|
||||
kv = []
|
||||
while nodeNumber != 0:
|
||||
_, stuff = self.readBtreeNode(nodeNumber)
|
||||
for k,v in stuff[recordNumber:]:
|
||||
if filterKeyFunction(k):
|
||||
kv.append((k,v))
|
||||
else:
|
||||
return kv
|
||||
nodeNumber = self.lastbtnode.fLink
|
||||
recordNumber = 0
|
||||
return kv
|
||||
|
||||
def getLBAsHax(self):
|
||||
nodes = [self.lastnodeNumber]
|
||||
n = self.lastbtnode
|
||||
for i in xrange(2):
|
||||
nodes.append(self.lastbtnode.bLink)
|
||||
self.readBtreeNode(self.lastbtnode.bLink)
|
||||
self.lastbtnode = n
|
||||
for i in xrange(2):
|
||||
nodes.append(self.lastbtnode.fLink)
|
||||
self.readBtreeNode(self.lastbtnode.fLink)
|
||||
res = []
|
||||
for n in nodes:
|
||||
res.append(self.file.getLBAforBlock(n * self.blocksForNode))
|
||||
return res
|
||||
|
||||
class CatalogTree(BTree):
|
||||
def __init__(self, file, volume):
|
||||
super(CatalogTree,self).__init__(file, HFSPlusCatalogKey, HFSPlusCatalogData)
|
||||
self.volume = volume
|
||||
|
||||
def printLeaf(self, k, d):
|
||||
if d.recordType == kHFSPlusFolderRecord or d.recordType == kHFSPlusFileRecord:
|
||||
print getString(k)
|
||||
|
||||
def getComparableKey(self, k2):
|
||||
#XXX http://dubeiko.com/development/FileSystems/HFSPLUS/tn1150.html#StringComparisonAlgorithm
|
||||
return (k2.parentID, getString(k2))
|
||||
|
||||
def searchByCNID(self, cnid):
|
||||
threadk, threadd = self.search((cnid, ""))
|
||||
return self.search((threadd.data.parentID, getString(threadd.data))) if threadd else (None, None)
|
||||
|
||||
def getFolderContents(self, cnid):
|
||||
return self.searchMultiple((cnid, ""), lambda k:k.parentID == cnid)
|
||||
|
||||
def getRecordFromPath(self, path):
|
||||
if not path.startswith("/"):
|
||||
return None, None
|
||||
if path == "/":
|
||||
return self.searchByCNID(kHFSRootFolderID)
|
||||
parentId=kHFSRootFolderID
|
||||
i = 1
|
||||
k, v = None, None
|
||||
for p in path.split("/")[1:]:
|
||||
if p == "":
|
||||
break
|
||||
k,v = self.search((parentId, p))
|
||||
if (k,v) == (None, None):
|
||||
return None, None
|
||||
|
||||
if v.recordType == kHFSPlusFolderRecord:
|
||||
parentId = v.data.folderID
|
||||
elif v.recordType == kHFSPlusFileRecord and is_symlink(v.data):
|
||||
linkdata = self.volume.readFileByRecord(v)
|
||||
print "symlink %s => %s" % (p, linkdata)
|
||||
if not linkdata:
|
||||
return None, None
|
||||
t = path.split("/")
|
||||
t[i] = linkdata
|
||||
newpath = "/".join(t)
|
||||
return self.getRecordFromPath(newpath)
|
||||
else:
|
||||
break
|
||||
i += 1
|
||||
return k,v
|
||||
|
||||
class ExtentsOverflowTree(BTree):
|
||||
def __init__(self, file):
|
||||
super(ExtentsOverflowTree,self).__init__(file, HFSPlusExtentKey, HFSPlusExtentRecord)
|
||||
|
||||
def getComparableKey(self, k2):
|
||||
return (k2.fileID, k2.forkType, k2.startBlock)
|
||||
|
||||
def searchExtents(self, fileID, forkType, startBlock):
|
||||
return self.search((fileID, forkType, startBlock))
|
||||
|
||||
class AttributesTree(BTree):
|
||||
def __init__(self, file):
|
||||
super(AttributesTree,self).__init__(file, HFSPlusAttrKey, HFSPlusAttrData)
|
||||
|
||||
def printLeaf(self, k, d):
|
||||
print k.fileID, getString(k), d.data.encode("hex")
|
||||
|
||||
def getComparableKey(self, k2):
|
||||
return (k2.fileID, getString(k2))
|
||||
|
||||
def searchXattr(self, fileID, name):
|
||||
k,v = self.search((fileID, name))
|
||||
return v.data if v else None
|
||||
|
||||
def getAllXattrs(self, fileID):
|
||||
res = {}
|
||||
for k,v in self.searchMultiple((fileID, ""), lambda k:k.fileID == fileID):
|
||||
res[getString(k)] = v.data
|
||||
return res
|
220
dump-imessages/iphone-dataprotection/python_scripts/hfs/emf.py
Normal file
220
dump-imessages/iphone-dataprotection/python_scripts/hfs/emf.py
Normal file
@ -0,0 +1,220 @@
|
||||
from construct import Struct, ULInt16, ULInt32, String
|
||||
from construct.macros import ULInt64, Padding, If
|
||||
from crypto.aes import AESencryptCBC, AESdecryptCBC
|
||||
from hfs import HFSVolume, HFSFile
|
||||
from keystore.keybag import Keybag
|
||||
from structs import HFSPlusVolumeHeader, kHFSPlusFileRecord, getString, \
|
||||
kHFSRootParentID
|
||||
from util import search_plist
|
||||
from util.bruteforce import loadKeybagFromVolume
|
||||
import hashlib
|
||||
import os
|
||||
import plistlib
|
||||
import struct
|
||||
|
||||
"""
|
||||
iOS >= 4 raw images
|
||||
http://opensource.apple.com/source/xnu/xnu-1699.22.73/bsd/hfs/hfs_cprotect.c
|
||||
http://opensource.apple.com/source/xnu/xnu-1699.22.73/bsd/sys/cprotect.h
|
||||
"""
|
||||
|
||||
cp_root_xattr = Struct("cp_root_xattr",
|
||||
ULInt16("major_version"),
|
||||
ULInt16("minor_version"),
|
||||
ULInt64("flags"),
|
||||
ULInt32("reserved1"),
|
||||
ULInt32("reserved2"),
|
||||
ULInt32("reserved3"),
|
||||
ULInt32("reserved4")
|
||||
)
|
||||
|
||||
cprotect_xattr = Struct("cprotect_xattr",
|
||||
ULInt16("xattr_major_version"),
|
||||
ULInt16("xattr_minor_version"),
|
||||
ULInt32("flags"),
|
||||
ULInt32("persistent_class"),
|
||||
ULInt32("key_size"),
|
||||
If(lambda ctx: ctx["xattr_major_version"] >= 4, Padding(20)),
|
||||
String("persistent_key", length=lambda ctx: ctx["key_size"])
|
||||
)
|
||||
NSProtectionNone = 4
|
||||
|
||||
PROTECTION_CLASSES={
|
||||
1:"NSFileProtectionComplete",
|
||||
2:"NSFileProtectionCompleteUnlessOpen",
|
||||
3:"NSFileProtectionCompleteUntilFirstUserAuthentication",
|
||||
4:"NSFileProtectionNone",
|
||||
5:"NSFileProtectionRecovery?"
|
||||
}
|
||||
|
||||
#HAX: flags set in finderInfo[3] to tell if the image was already decrypted
|
||||
FLAG_DECRYPTING = 0x454d4664 #EMFd big endian
|
||||
FLAG_DECRYPTED = 0x454d4644 #EMFD big endian
|
||||
|
||||
class EMFFile(HFSFile):
|
||||
def __init__(self, volume, hfsplusfork, fileID, filekey, deleted=False):
|
||||
super(EMFFile,self).__init__(volume, hfsplusfork, fileID, deleted)
|
||||
self.filekey = filekey
|
||||
self.ivkey = None
|
||||
self.decrypt_offset = 0
|
||||
if volume.cp_major_version == 4:
|
||||
self.ivkey = hashlib.sha1(filekey).digest()[:16]
|
||||
|
||||
def processBlock(self, block, lba):
|
||||
iv = self.volume.ivForLBA(lba)
|
||||
ciphertext = AESencryptCBC(block, self.volume.emfkey, iv)
|
||||
if not self.ivkey:
|
||||
clear = AESdecryptCBC(ciphertext, self.filekey, iv)
|
||||
else:
|
||||
clear = ""
|
||||
for i in xrange(len(block)/0x1000):
|
||||
iv = self.volume.ivForLBA(self.decrypt_offset, False)
|
||||
iv = AESencryptCBC(iv, self.ivkey)
|
||||
clear += AESdecryptCBC(ciphertext[i*0x1000:(i+1)*0x1000], self.filekey,iv)
|
||||
self.decrypt_offset += 0x1000
|
||||
return clear
|
||||
|
||||
def decryptFile(self):
|
||||
self.decrypt_offset = 0
|
||||
bs = self.volume.blockSize
|
||||
for extent in self.extents:
|
||||
for i in xrange(extent.blockCount):
|
||||
lba = extent.startBlock+i
|
||||
data = self.volume.readBlock(lba)
|
||||
if len(data) == bs:
|
||||
clear = self.processBlock(data, lba)
|
||||
self.volume.writeBlock(lba, clear)
|
||||
|
||||
|
||||
class EMFVolume(HFSVolume):
|
||||
def __init__(self, bdev, device_infos, **kwargs):
|
||||
super(EMFVolume,self).__init__(bdev, **kwargs)
|
||||
volumeid = self.volumeID().encode("hex")
|
||||
|
||||
if not device_infos:
|
||||
dirname = os.path.dirname(bdev.filename)
|
||||
device_infos = search_plist(dirname, {"dataVolumeUUID":volumeid})
|
||||
if not device_infos:
|
||||
raise Exception("Missing keyfile")
|
||||
try:
|
||||
self.emfkey = None
|
||||
if device_infos.has_key("EMF"):
|
||||
self.emfkey = device_infos["EMF"].decode("hex")
|
||||
self.lbaoffset = device_infos["dataVolumeOffset"]
|
||||
self.keybag = Keybag.createWithPlist(device_infos)
|
||||
except:
|
||||
raise #Exception("Invalid keyfile")
|
||||
|
||||
rootxattr = self.getXattr(kHFSRootParentID, "com.apple.system.cprotect")
|
||||
self.decrypted = (self.header.finderInfo[3] == FLAG_DECRYPTED)
|
||||
self.cp_major_version = None
|
||||
self.cp_root = None
|
||||
if rootxattr == None:
|
||||
print "(No root com.apple.system.cprotect xattr)"
|
||||
else:
|
||||
self.cp_root = cp_root_xattr.parse(rootxattr)
|
||||
ver = self.cp_root.major_version
|
||||
print "cprotect version : %d (iOS %d)" % (ver, 4 + int(ver != 2))
|
||||
assert self.cp_root.major_version == 2 or self.cp_root.major_version == 4
|
||||
self.cp_major_version = self.cp_root.major_version
|
||||
self.keybag = loadKeybagFromVolume(self, device_infos)
|
||||
|
||||
def ivForLBA(self, lba, add=True):
|
||||
iv = ""
|
||||
if add:
|
||||
lba = lba + self.lbaoffset
|
||||
lba &= 0xffffffff
|
||||
for _ in xrange(4):
|
||||
if (lba & 1):
|
||||
lba = 0x80000061 ^ (lba >> 1);
|
||||
else:
|
||||
lba = lba >> 1;
|
||||
iv += struct.pack("<L", lba)
|
||||
return iv
|
||||
|
||||
def getFileKeyForCprotect(self, cp):
|
||||
if self.cp_major_version == None:
|
||||
self.cp_major_version = struct.unpack("<H", cp[:2])[0]
|
||||
cprotect = cprotect_xattr.parse(cp)
|
||||
return self.keybag.unwrapKeyForClass(cprotect.persistent_class, cprotect.persistent_key)
|
||||
|
||||
def getFileKeyForFileId(self, fileid):
|
||||
cprotect = self.getXattr(fileid, "com.apple.system.cprotect")
|
||||
if cprotect == None:
|
||||
return None
|
||||
return self.getFileKeyForCprotect(cprotect)
|
||||
|
||||
def readFile(self, path, outFolder="./", returnString=False):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
if not v:
|
||||
print "File %s not found" % path
|
||||
return
|
||||
assert v.recordType == kHFSPlusFileRecord
|
||||
cprotect = self.getXattr(v.data.fileID, "com.apple.system.cprotect")
|
||||
if cprotect == None or not self.cp_root or self.decrypted:
|
||||
#print "cprotect attr not found, reading normally"
|
||||
return super(EMFVolume, self).readFile(path, returnString=returnString)
|
||||
filekey = self.getFileKeyForCprotect(cprotect)
|
||||
if not filekey:
|
||||
print "Cannot unwrap file key for file %s protection_class=%d" % (path, cprotect_xattr.parse(cprotect).persistent_class)
|
||||
return
|
||||
f = EMFFile(self, v.data.dataFork, v.data.fileID, filekey)
|
||||
if returnString:
|
||||
return f.readAllBuffer()
|
||||
f.readAll(outFolder + os.path.basename(path))
|
||||
return True
|
||||
|
||||
def flagVolume(self, flag):
|
||||
self.header.finderInfo[3] = flag
|
||||
h = HFSPlusVolumeHeader.build(self.header)
|
||||
return self.bdev.write(0x400, h)
|
||||
|
||||
def decryptAllFiles(self):
|
||||
if self.header.finderInfo[3] == FLAG_DECRYPTING:
|
||||
print "Volume is half-decrypted, aborting (finderInfo[3] == FLAG_DECRYPTING)"
|
||||
return
|
||||
elif self.header.finderInfo[3] == FLAG_DECRYPTED:
|
||||
print "Volume already decrypted (finderInfo[3] == FLAG_DECRYPTED)"
|
||||
return
|
||||
self.failedToGetKey = []
|
||||
self.notEncrypted = []
|
||||
self.decryptedCount = 0
|
||||
self.flagVolume(FLAG_DECRYPTING)
|
||||
self.catalogTree.traverseLeafNodes(callback=self.decryptFile)
|
||||
self.flagVolume(FLAG_DECRYPTED)
|
||||
print "Decrypted %d files" % self.decryptedCount
|
||||
print "Failed to unwrap keys for : ", self.failedToGetKey
|
||||
print "Not encrypted files : %d" % len(self.notEncrypted)
|
||||
|
||||
def decryptFile(self, k,v):
|
||||
if v.recordType == kHFSPlusFileRecord:
|
||||
filename = getString(k).encode("utf-8")
|
||||
cprotect = self.getXattr(v.data.fileID, "com.apple.system.cprotect")
|
||||
if not cprotect:
|
||||
self.notEncrypted.append(filename)
|
||||
return
|
||||
fk = self.getFileKeyForCprotect(cprotect)
|
||||
if not fk:
|
||||
self.failedToGetKey.append(filename)
|
||||
return
|
||||
print "Decrypting", filename
|
||||
f = EMFFile(self, v.data.dataFork, v.data.fileID, fk)
|
||||
f.decryptFile()
|
||||
self.decryptedCount += 1
|
||||
|
||||
def list_protected_files(self):
|
||||
self.protected_dict = {}
|
||||
self.xattrTree.traverseLeafNodes(callback=self.inspectXattr)
|
||||
for k in self.protected_dict.keys():
|
||||
print k
|
||||
for v in self.protected_dict[k]: print "\t",v
|
||||
print ""
|
||||
|
||||
def inspectXattr(self, k, v):
|
||||
if getString(k) == "com.apple.system.cprotect" and k.fileID != kHFSRootParentID:
|
||||
c = cprotect_xattr.parse(v.data)
|
||||
if c.persistent_class != NSProtectionNone:
|
||||
#desc = "%d %s" % (k.fileID, self.getFullPath(k.fileID))
|
||||
desc = "%s" % self.getFullPath(k.fileID)
|
||||
self.protected_dict.setdefault(PROTECTION_CLASSES.get(c.persistent_class),[]).append(desc)
|
||||
#print k.fileID, self.getFullPath(k.fileID), PROTECTION_CLASSES.get(c.persistent_class)
|
315
dump-imessages/iphone-dataprotection/python_scripts/hfs/hfs.py
Normal file
315
dump-imessages/iphone-dataprotection/python_scripts/hfs/hfs.py
Normal file
@ -0,0 +1,315 @@
|
||||
from btree import AttributesTree, CatalogTree, ExtentsOverflowTree
|
||||
from structs import *
|
||||
from util import write_file
|
||||
from util.bdev import FileBlockDevice
|
||||
import datetime
|
||||
import hashlib
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
import zlib
|
||||
|
||||
def hfs_date(t):
|
||||
return datetime.datetime(1904,1,1) + datetime.timedelta(seconds=t)
|
||||
|
||||
class HFSFile(object):
|
||||
def __init__(self, volume, hfsplusfork, fileID, deleted=False):
|
||||
self.volume = volume
|
||||
self.blockSize = volume.blockSize
|
||||
self.fileID = fileID
|
||||
self.totalBlocks = hfsplusfork.totalBlocks
|
||||
self.logicalSize = hfsplusfork.logicalSize
|
||||
self.extents = volume.getAllExtents(hfsplusfork, fileID)
|
||||
self.deleted = deleted
|
||||
|
||||
def readAll(self, outputfile, truncate=True):
|
||||
f = open(outputfile, "wb")
|
||||
for i in xrange(self.totalBlocks):
|
||||
f.write(self.readBlock(i))
|
||||
if truncate:
|
||||
f.truncate(self.logicalSize)
|
||||
f.close()
|
||||
|
||||
def readAllBuffer(self, truncate=True):
|
||||
r = ""
|
||||
for i in xrange(self.totalBlocks):
|
||||
r += self.readBlock(i)
|
||||
if truncate:
|
||||
r = r[:self.logicalSize]
|
||||
return r
|
||||
|
||||
def processBlock(self, block, lba):
|
||||
return block
|
||||
|
||||
def readBlock(self, n):
|
||||
bs = self.volume.blockSize
|
||||
if n*bs > self.logicalSize:
|
||||
return "BLOCK OUT OF BOUNDS" + "\xFF" * (bs - len("BLOCK OUT OF BOUNDS"))
|
||||
bc = 0
|
||||
for extent in self.extents:
|
||||
bc += extent.blockCount
|
||||
if n < bc:
|
||||
lba = extent.startBlock+(n-(bc-extent.blockCount))
|
||||
if not self.deleted and self.fileID != kHFSAllocationFileID and not self.volume.isBlockInUse(lba):
|
||||
print "FAIL, block %x not marked as used" % n
|
||||
return self.processBlock(self.volume.readBlock(lba), lba)
|
||||
return ""
|
||||
|
||||
def getLBAforBlock(self, n):
|
||||
bc = 0
|
||||
for extent in self.extents:
|
||||
bc += extent.blockCount
|
||||
if n < bc:
|
||||
return extent.startBlock+(n-(bc-extent.blockCount))
|
||||
|
||||
def writeBlock(self, n, data):
|
||||
bs = self.volume.blockSize
|
||||
if n*bs > self.logicalSize:
|
||||
raise Exception("writeBlock, out of bounds %d" % n)
|
||||
bc = 0
|
||||
for extent in self.extents:
|
||||
bc += extent.blockCount
|
||||
if n < bc:
|
||||
lba = extent.startBlock+(n-(bc-extent.blockCount))
|
||||
self.volume.writeBlock(lba, data)
|
||||
return
|
||||
|
||||
|
||||
class HFSCompressedResourceFork(HFSFile):
|
||||
def __init__(self, volume, hfsplusfork, fileID):
|
||||
super(HFSCompressedResourceFork,self).__init__(volume, hfsplusfork, fileID)
|
||||
block0 = self.readBlock(0)
|
||||
self.header = HFSPlusCmpfRsrcHead.parse(block0)
|
||||
print self.header
|
||||
self.blocks = HFSPlusCmpfRsrcBlockHead.parse(block0[self.header.headerSize:])
|
||||
print "HFSCompressedResourceFork numBlocks:", self.blocks.numBlocks
|
||||
|
||||
#HAX, readblock not implemented
|
||||
def readAllBuffer(self):
|
||||
buff = super(HFSCompressedResourceFork, self).readAllBuffer()
|
||||
r = ""
|
||||
base = self.header.headerSize + 4
|
||||
for b in self.blocks.HFSPlusCmpfRsrcBlock:
|
||||
r += zlib.decompress(buff[base+b.offset:base+b.offset+b.size])
|
||||
return r
|
||||
|
||||
class HFSVolume(object):
|
||||
def __init__(self, bdev):
|
||||
self.bdev = bdev
|
||||
|
||||
try:
|
||||
data = self.bdev.readBlock(0)
|
||||
self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
|
||||
assert self.header.signature == 0x4858 or self.header.signature == 0x482B
|
||||
except:
|
||||
raise
|
||||
#raise Exception("Not an HFS+ image")
|
||||
|
||||
self.blockSize = self.header.blockSize
|
||||
self.bdev.setBlockSize(self.blockSize)
|
||||
|
||||
#if os.path.getsize(filename) < self.header.totalBlocks * self.blockSize:
|
||||
# print "WARNING: HFS image appears to be truncated"
|
||||
|
||||
self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
|
||||
self.allocationBitmap = self.allocationFile.readAllBuffer()
|
||||
self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
|
||||
self.extentsTree = ExtentsOverflowTree(self.extentsFile)
|
||||
self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
|
||||
self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
|
||||
self.catalogTree = CatalogTree(self.catalogFile, self)
|
||||
self.xattrTree = AttributesTree(self.xattrFile)
|
||||
|
||||
self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)
|
||||
|
||||
def readBlock(self, b):
|
||||
return self.bdev.readBlock(b)
|
||||
|
||||
def writeBlock(self, lba, data):
|
||||
return self.bdev.writeBlock(lba, data)
|
||||
|
||||
def volumeID(self):
|
||||
return struct.pack(">LL", self.header.finderInfo[6], self.header.finderInfo[7])
|
||||
|
||||
def isBlockInUse(self, block):
|
||||
thisByte = ord(self.allocationBitmap[block / 8])
|
||||
return (thisByte & (1 << (7 - (block % 8)))) != 0
|
||||
|
||||
def unallocatedBlocks(self):
|
||||
for i in xrange(self.header.totalBlocks):
|
||||
if not self.isBlockInUse(i):
|
||||
yield i, self.read(i*self.blockSize, self.blockSize)
|
||||
|
||||
def getExtentsOverflowForFile(self, fileID, startBlock, forkType=kForkTypeData):
|
||||
return self.extentsTree.searchExtents(fileID, forkType, startBlock)
|
||||
|
||||
def getXattr(self, fileID, name):
|
||||
return self.xattrTree.searchXattr(fileID, name)
|
||||
|
||||
def getFileByPath(self, path):
|
||||
return self.catalogTree.getRecordFromPath(path)
|
||||
|
||||
def getFileIDByPath(self, path):
|
||||
key, record = self.catalogTree.getRecordFromPath(path)
|
||||
if not record:
|
||||
return
|
||||
if record.recordType == kHFSPlusFolderRecord:
|
||||
return record.data.folderID
|
||||
return record.data.fileID
|
||||
|
||||
def listFolderContents(self, path):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
if not k or v.recordType != kHFSPlusFolderRecord:
|
||||
return
|
||||
for k,v in self.catalogTree.getFolderContents(v.data.folderID):
|
||||
if v.recordType == kHFSPlusFolderRecord:
|
||||
#.HFS+ Private Directory Data\r
|
||||
print v.data.folderID, getString(k).replace("\r","") + "/"
|
||||
elif v.recordType == kHFSPlusFileRecord:
|
||||
print v.data.fileID, getString(k)
|
||||
|
||||
def ls(self, path):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
return self._ls(k, v)
|
||||
|
||||
def _ls(self, k, v):
|
||||
res = {}
|
||||
|
||||
if not k or v.recordType != kHFSPlusFolderRecord:
|
||||
return None
|
||||
for k,v in self.catalogTree.getFolderContents(v.data.folderID):
|
||||
if v.recordType == kHFSPlusFolderRecord:
|
||||
#.HFS+ Private Directory Data\r
|
||||
res[getString(k).replace("\r","") + "/"] = v.data
|
||||
elif v.recordType == kHFSPlusFileRecord:
|
||||
res[getString(k)] = v.data
|
||||
return res
|
||||
|
||||
def listXattrs(self, path):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
if k and v.recordType == kHFSPlusFileRecord:
|
||||
return self.xattrTree.getAllXattrs(v.data.fileID)
|
||||
elif k and v.recordType == kHFSPlusFolderThreadRecord:
|
||||
return self.xattrTree.getAllXattrs(v.data.folderID)
|
||||
|
||||
def readFileByRecord(self, record):
|
||||
assert record.recordType == kHFSPlusFileRecord
|
||||
xattr = self.getXattr(record.data.fileID, "com.apple.decmpfs")
|
||||
data = None
|
||||
if xattr:
|
||||
decmpfs = HFSPlusDecmpfs.parse(xattr)
|
||||
if decmpfs.compression_type == 1:
|
||||
return xattr[16:]
|
||||
elif decmpfs.compression_type == 3:
|
||||
if decmpfs.uncompressed_size == len(xattr) - 16:
|
||||
return xattr[16:]
|
||||
return zlib.decompress(xattr[16:])
|
||||
elif decmpfs.compression_type == 4:
|
||||
f = HFSCompressedResourceFork(self, record.data.resourceFork, record.data.fileID)
|
||||
data = f.readAllBuffer()
|
||||
return data
|
||||
|
||||
f = HFSFile(self, record.data.dataFork, record.data.fileID)
|
||||
return f.readAllBuffer()
|
||||
|
||||
#TODO: returnString compress
|
||||
def readFile(self, path, outFolder="./", returnString=False):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
if not v:
|
||||
print "File %s not found" % path
|
||||
return
|
||||
assert v.recordType == kHFSPlusFileRecord
|
||||
xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
|
||||
if xattr:
|
||||
decmpfs = HFSPlusDecmpfs.parse(xattr)
|
||||
|
||||
if decmpfs.compression_type == 1:
|
||||
return xattr[16:]
|
||||
elif decmpfs.compression_type == 3:
|
||||
if decmpfs.uncompressed_size == len(xattr) - 16:
|
||||
z = xattr[16:]
|
||||
else:
|
||||
z = zlib.decompress(xattr[16:])
|
||||
open(outFolder + os.path.basename(path), "wb").write(z)
|
||||
return
|
||||
elif decmpfs.compression_type == 4:
|
||||
f = HFSCompressedResourceFork(self, v.data.resourceFork, v.data.fileID)
|
||||
z = f.readAllBuffer()
|
||||
open(outFolder + os.path.basename(path), "wb").write(z)
|
||||
return z
|
||||
|
||||
f = HFSFile(self, v.data.dataFork, v.data.fileID)
|
||||
if returnString:
|
||||
return f.readAllBuffer()
|
||||
else:
|
||||
f.readAll(outFolder + os.path.basename(path))
|
||||
|
||||
def readJournal(self):
|
||||
#jb = self.read(self.header.journalInfoBlock * self.blockSize, self.blockSize)
|
||||
#jib = JournalInfoBlock.parse(jb)
|
||||
#return self.read(jib.offset,jib.size)
|
||||
return self.readFile("/.journal", returnString=True)
|
||||
|
||||
def listAllFileIds(self):
|
||||
self.fileids={}
|
||||
self.catalogTree.traverseLeafNodes(callback=self.grabFileId)
|
||||
return self.fileids
|
||||
|
||||
def grabFileId(self, k,v):
|
||||
if v.recordType == kHFSPlusFileRecord:
|
||||
self.fileids[v.data.fileID] = True
|
||||
|
||||
def getFileRecordForFileID(self, fileID):
|
||||
k,v = self.catalogTree.searchByCNID(fileID)
|
||||
return v
|
||||
|
||||
def getFullPath(self, fileID):
|
||||
k,v = self.catalogTree.search((fileID, ""))
|
||||
if not k:
|
||||
print "File ID %d not found" % fileID
|
||||
return ""
|
||||
p = getString(v.data)
|
||||
while k:
|
||||
k,v = self.catalogTree.search((v.data.parentID, ""))
|
||||
if k.parentID == kHFSRootFolderID:
|
||||
break
|
||||
p = getString(v.data) + "/" + p
|
||||
|
||||
return "/" + p
|
||||
|
||||
def getFileRecordForPath(self, path):
|
||||
k,v = self.catalogTree.getRecordFromPath(path)
|
||||
if not k:
|
||||
return
|
||||
return v.data
|
||||
|
||||
def getAllExtents(self, hfsplusfork, fileID):
|
||||
b = 0
|
||||
extents = []
|
||||
for extent in hfsplusfork.HFSPlusExtentDescriptor:
|
||||
extents.append(extent)
|
||||
b += extent.blockCount
|
||||
while b != hfsplusfork.totalBlocks:
|
||||
k,v = self.getExtentsOverflowForFile(fileID, b)
|
||||
if not v:
|
||||
print "extents overflow missing, startblock=%d" % b
|
||||
break
|
||||
for extent in v:
|
||||
extents.append(extent)
|
||||
b += extent.blockCount
|
||||
return extents
|
||||
|
||||
def dohashFiles(self, k,v):
|
||||
if v.recordType == kHFSPlusFileRecord:
|
||||
filename = getString(k)
|
||||
f = HFSFile(self, v.data.dataFork, v.data.fileID)
|
||||
print filename, hashlib.sha1(f.readAllBuffer()).hexdigest()
|
||||
|
||||
def hashFiles(self):
|
||||
self.catalogTree.traverseLeafNodes(callback=self.dohashFiles)
|
||||
|
||||
if __name__ == "__main__":
|
||||
v = HFSVolume("myramdisk.dmg",offset=0x40)
|
||||
v.listFolderContents("/")
|
||||
print v.readFile("/usr/local/share/restore/imeisv_svn.plist")
|
||||
print v.listXattrs("/usr/local/share/restore/imeisv_svn.plist")
|
@ -0,0 +1,152 @@
|
||||
from crypto.aes import AESencryptCBC, AESdecryptCBC
|
||||
from emf import cprotect_xattr, EMFFile
|
||||
from structs import *
|
||||
from util import write_file, sizeof_fmt
|
||||
import hashlib
|
||||
|
||||
"""
|
||||
Implementation of the following paper :
|
||||
Using the HFS+ Journal For Deleted File Recovery. Aaron Burghardt, Adam Feldman. DFRWS 2008
|
||||
http://www.dfrws.org/2008/proceedings/p76-burghardt.pdf
|
||||
http://www.dfrws.org/2008/proceedings/p76-burghardt_pres.pdf
|
||||
"""
|
||||
|
||||
def carveBtreeNode(node, kClass, dClass):
|
||||
try:
|
||||
btnode = BTNodeDescriptor.parse(node)
|
||||
|
||||
if btnode.kind == kBTLeafNode:
|
||||
off = BTNodeDescriptor.sizeof()
|
||||
recs = []
|
||||
offsets = Array(btnode.numRecords, UBInt16("off")).parse(node[-2*btnode.numRecords:])
|
||||
for i in xrange(btnode.numRecords):
|
||||
off = offsets[btnode.numRecords-i-1]
|
||||
k = kClass.parse(node[off:])
|
||||
off += 2 + k.keyLength
|
||||
d = dClass.parse(node[off:])
|
||||
recs.append((k,d))
|
||||
return recs
|
||||
return []
|
||||
except:
|
||||
return []
|
||||
|
||||
"""
|
||||
for standard HFS volumes
|
||||
"""
|
||||
def carveHFSVolumeJournal(volume):
|
||||
journal = volume.readJournal()
|
||||
hdr = journal_header.parse(journal)
|
||||
sector_size = hdr.jhdr_size
|
||||
nodeSize = volume.catalogTree.nodeSize
|
||||
|
||||
f={}
|
||||
for i in xrange(0,len(journal), sector_size):
|
||||
for k,v in carveBtreeNode(journal[i:i+nodeSize],HFSPlusCatalogKey, HFSPlusCatalogData):
|
||||
if v.recordType == kHFSPlusFileRecord:
|
||||
name = getString(k)
|
||||
h = hashlib.sha1(HFSPlusCatalogKey.build(k)).digest()
|
||||
if f.has_key(h):
|
||||
continue
|
||||
if volume.catalogTree.searchByCNID(v.data.fileID) == (None, None):
|
||||
if volume.isBlockInUse(v.data.dataFork.HFSPlusExtentDescriptor[0].startBlock) == False:
|
||||
print "deleted file", v.data.fileID, name
|
||||
fileid = v.data.fileID
|
||||
f[h]=(name, v)
|
||||
return f.values()
|
||||
|
||||
|
||||
magics=["SQLite", "bplist", "<?xml", "\xFF\xD8\xFF", "\xCE\xFA\xED\xFE", "\x89PNG", "\x00\x00\x00\x1CftypM4A",
|
||||
"\x00\x00\x00\x14ftypqt"]
|
||||
"""
|
||||
HAX: should do something better like compute entropy or something
|
||||
"""
|
||||
def isDecryptedCorrectly(data):
|
||||
for m in magics:
|
||||
if data.startswith(m):
|
||||
return True
|
||||
return False
|
||||
|
||||
"""
|
||||
carve the journal for deleted cprotect xattrs and file records
|
||||
"""
|
||||
def carveEMFVolumeJournal(volume):
|
||||
journal = volume.readJournal()
|
||||
print "Journal size : %s" % sizeof_fmt(len(journal))
|
||||
hdr = journal_header.parse(journal)
|
||||
sector_size = hdr.jhdr_size
|
||||
nodeSize = volume.catalogTree.nodeSize
|
||||
print "Collecting existing file ids"
|
||||
fileIds = volume.listAllFileIds()
|
||||
print "%d file IDs" % len(fileIds.keys())
|
||||
files = {}
|
||||
keys = {}
|
||||
|
||||
for i in xrange(0,len(journal),sector_size):
|
||||
for k,v in carveBtreeNode(journal[i:i+nodeSize],HFSPlusCatalogKey, HFSPlusCatalogData):
|
||||
if v.recordType == kHFSPlusFileRecord:
|
||||
name = getString(k)
|
||||
h = hashlib.sha1(HFSPlusCatalogKey.build(k)).digest()
|
||||
if files.has_key(h):
|
||||
continue
|
||||
if not fileIds.has_key(v.data.fileID):
|
||||
#we only keep files where the first block is not marked as in use
|
||||
if volume.isBlockInUse(v.data.dataFork.HFSPlusExtentDescriptor[0].startBlock) == False:
|
||||
print "Found deleted file record", v.data.fileID, name
|
||||
files[h] = (name,v)
|
||||
for k,v in carveBtreeNode(journal[i:i+nodeSize],HFSPlusAttrKey, HFSPlusAttrData):
|
||||
if getString(k) == "com.apple.system.cprotect":
|
||||
if not fileIds.has_key(k.fileID):
|
||||
filekeys = keys.setdefault(k.fileID, [])
|
||||
try:
|
||||
cprotect = cprotect_xattr.parse(v.data)
|
||||
except:
|
||||
continue
|
||||
#assert cprotect.xattr_major_version == 2
|
||||
filekey = volume.keybag.unwrapKeyForClass(cprotect.persistent_class, cprotect.persistent_key)
|
||||
if filekey and not filekey in filekeys:
|
||||
print "Found key for file", k.fileID
|
||||
filekeys.append(filekey)
|
||||
|
||||
return files.values(), keys
|
||||
|
||||
"""
|
||||
"bruteforce" method, tries to decrypt all unallocated blocks with provided file keys
|
||||
this is a hack, don't expect interesting results with this
|
||||
"""
|
||||
def carveEMFemptySpace(volume, file_keys, outdir):
|
||||
for lba, block in volume.unallocatedBlocks():
|
||||
iv = volume.ivForLBA(lba)
|
||||
for filekey in file_keys:
|
||||
ciphertext = AESencryptCBC(block, volume.emfkey, iv)
|
||||
clear = AESdecryptCBC(ciphertext, filekey, iv)
|
||||
if isDecryptedCorrectly(clear):
|
||||
print "Decrypted stuff at lba %x" % lba
|
||||
open(outdir+ "/%x.bin" % lba, "wb").write(clear)
|
||||
|
||||
|
||||
def do_emf_carving(volume, carveokdir, carvenokdir):
|
||||
deletedFiles, filekeys = carveEMFVolumeJournal(volume)
|
||||
|
||||
print "Journal carving done, trying to extract deleted files"
|
||||
n = 0
|
||||
for name, vv in deletedFiles:
|
||||
for filekey in filekeys.get(vv.data.fileID, []):
|
||||
ff = EMFFile(volume,vv.data.dataFork, vv.data.fileID, filekey, deleted=True)
|
||||
data = ff.readAllBuffer()
|
||||
if isDecryptedCorrectly(data):
|
||||
write_file(carveokdir + "%d_%s" % (vv.data.fileID, name.replace("/","_")),data)
|
||||
n += 1
|
||||
else:
|
||||
write_file(carvenokdir + "%d_%s" % (vv.data.fileID, name.replace("/","_")),data)
|
||||
if not filekeys.has_key(vv.data.fileID):
|
||||
print "Missing file key for", name
|
||||
else:
|
||||
del filekeys[vv.data.fileID]
|
||||
|
||||
print "Done, extracted %d files" % n
|
||||
|
||||
if False:
|
||||
fks = set(reduce(lambda x,y: x+y, filekeys.values()))
|
||||
print "%d file keys left, try carving empty space (slow) ? CTRL-C to exit" % len(fks)
|
||||
raw_input()
|
||||
carveEMFemptySpace(volume, fks)
|
@ -0,0 +1,341 @@
|
||||
from construct import *
|
||||
from construct.macros import UBInt64
|
||||
"""
|
||||
http://developer.apple.com/library/mac/#technotes/tn/tn1150.html
|
||||
"""
|
||||
|
||||
def getString(obj):
|
||||
return obj.HFSUniStr255.unicode
|
||||
|
||||
S_IFLNK = 0120000
|
||||
kSymLinkFileType = 0x736C6E6B
|
||||
kSymLinkCreator = 0x72686170
|
||||
kHardLinkFileType = 0x686C6E6B
|
||||
kHFSPlusCreator = 0x6866732B
|
||||
|
||||
kHFSCaseFolding = 0xCF
|
||||
kHFSBinaryCompare = 0xBC
|
||||
|
||||
|
||||
def is_symlink(rec):
|
||||
return rec.FileInfo.fileCreator == kSymLinkCreator and rec.FileInfo.fileType == kSymLinkFileType
|
||||
|
||||
kHFSRootParentID = 1
|
||||
kHFSRootFolderID = 2
|
||||
kHFSExtentsFileID = 3
|
||||
kHFSCatalogFileID = 4
|
||||
kHFSBadBlockFileID = 5
|
||||
kHFSAllocationFileID = 6
|
||||
kHFSStartupFileID = 7
|
||||
kHFSAttributesFileID = 8
|
||||
kHFSRepairCatalogFileID = 14
|
||||
kHFSBogusExtentFileID = 15
|
||||
kHFSFirstUserCatalogNodeID = 16
|
||||
|
||||
kBTLeafNode = -1
|
||||
kBTIndexNode = 0
|
||||
kBTHeaderNode = 1
|
||||
kBTMapNode = 2
|
||||
|
||||
kHFSPlusFolderRecord = 0x0001
|
||||
kHFSPlusFileRecord = 0x0002
|
||||
kHFSPlusFolderThreadRecord = 0x0003
|
||||
kHFSPlusFileThreadRecord = 0x0004
|
||||
|
||||
kHFSPlusAttrInlineData = 0x10
|
||||
kHFSPlusAttrForkData = 0x20
|
||||
kHFSPlusAttrExtents = 0x30
|
||||
|
||||
kForkTypeData = 0
|
||||
kForkTypeRsrc = 0xFF
|
||||
|
||||
kHFSVolumeHardwareLockBit = 7
|
||||
kHFSVolumeUnmountedBit = 8
|
||||
kHFSVolumeSparedBlocksBit = 9
|
||||
kHFSVolumeNoCacheRequiredBit = 10
|
||||
kHFSBootVolumeInconsistentBit = 11
|
||||
kHFSCatalogNodeIDsReusedBit = 12
|
||||
kHFSVolumeJournaledBit = 13
|
||||
kHFSVolumeSoftwareLockBit = 15
|
||||
|
||||
DECMPFS_MAGIC = 0x636d7066 #cmpf
|
||||
|
||||
HFSPlusExtentDescriptor = Struct("HFSPlusExtentDescriptor",
|
||||
UBInt32("startBlock"),
|
||||
UBInt32("blockCount")
|
||||
)
|
||||
HFSPlusExtentRecord = Array(8,HFSPlusExtentDescriptor)
|
||||
|
||||
HFSPlusForkData = Struct("HFSPlusForkData",
|
||||
UBInt64("logicalSize"),
|
||||
UBInt32("clumpSize"),
|
||||
UBInt32("totalBlocks"),
|
||||
Array(8, HFSPlusExtentDescriptor)
|
||||
)
|
||||
|
||||
HFSPlusVolumeHeader= Struct("HFSPlusVolumeHeader",
|
||||
UBInt16("signature"),
|
||||
UBInt16("version"),
|
||||
UBInt32("attributes"),
|
||||
UBInt32("lastMountedVersion"),
|
||||
UBInt32("journalInfoBlock"),
|
||||
UBInt32("createDate"),
|
||||
UBInt32("modifyDate"),
|
||||
UBInt32("backupDate"),
|
||||
UBInt32("checkedDate"),
|
||||
UBInt32("fileCount"),
|
||||
UBInt32("folderCount"),
|
||||
UBInt32("blockSize"),
|
||||
UBInt32("totalBlocks"),
|
||||
UBInt32("freeBlocks"),
|
||||
UBInt32("nextAllocation"),
|
||||
UBInt32("rsrcClumpSize"),
|
||||
UBInt32("dataClumpSize"),
|
||||
UBInt32("nextCatalogID"),
|
||||
UBInt32("writeCount"),
|
||||
UBInt64("encodingsBitmap"),
|
||||
|
||||
Array(8, UBInt32("finderInfo")),
|
||||
|
||||
Struct("allocationFile", Embed(HFSPlusForkData)),
|
||||
Struct("extentsFile", Embed(HFSPlusForkData)),
|
||||
Struct("catalogFile", Embed(HFSPlusForkData)),
|
||||
Struct("attributesFile", Embed(HFSPlusForkData)),
|
||||
Struct("startupFile", Embed(HFSPlusForkData)),
|
||||
)
|
||||
|
||||
BTNodeDescriptor = Struct("BTNodeDescriptor",
|
||||
UBInt32("fLink"),
|
||||
UBInt32("bLink"),
|
||||
SBInt8("kind"),
|
||||
UBInt8("height"),
|
||||
UBInt16("numRecords"),
|
||||
UBInt16("reserved")
|
||||
)
|
||||
|
||||
BTHeaderRec = Struct("BTHeaderRec",
|
||||
UBInt16("treeDepth"),
|
||||
UBInt32("rootNode"),
|
||||
UBInt32("leafRecords"),
|
||||
UBInt32("firstLeafNode"),
|
||||
UBInt32("lastLeafNode"),
|
||||
UBInt16("nodeSize"),
|
||||
UBInt16("maxKeyLength"),
|
||||
UBInt32("totalNodes"),
|
||||
UBInt32("freeNodes"),
|
||||
UBInt16("reserved1"),
|
||||
UBInt32("clumpSize"),
|
||||
UBInt8("btreeType"),
|
||||
UBInt8("keyCompareType"),
|
||||
UBInt32("attributes"),
|
||||
Array(16, UBInt32("reserved3"))
|
||||
)
|
||||
|
||||
HFSUniStr255 = Struct("HFSUniStr255",
|
||||
UBInt16("length"),
|
||||
String("unicode", lambda ctx: ctx["length"] * 2, encoding="utf-16-be")
|
||||
)
|
||||
|
||||
HFSPlusAttrKey = Struct("HFSPlusAttrKey",
|
||||
UBInt16("keyLength"),
|
||||
UBInt16("pad"),
|
||||
UBInt32("fileID"),
|
||||
UBInt32("startBlock"),
|
||||
HFSUniStr255,
|
||||
#UBInt32("nodeNumber")
|
||||
)
|
||||
|
||||
HFSPlusAttrData = Struct("HFSPlusAttrData",
|
||||
UBInt32("recordType"),
|
||||
Array(2, UBInt32("reserved")),
|
||||
UBInt32("size"),
|
||||
MetaField("data", lambda ctx: ctx["size"])
|
||||
)
|
||||
|
||||
HFSPlusCatalogKey = Struct("HFSPlusCatalogKey",
|
||||
UBInt16("keyLength"),
|
||||
UBInt32("parentID"),
|
||||
HFSUniStr255
|
||||
)
|
||||
|
||||
HFSPlusBSDInfo = Struct("HFSPlusBSDInfo",
|
||||
UBInt32("ownerID"),
|
||||
UBInt32("groupID"),
|
||||
UBInt8("adminFlags"),
|
||||
UBInt8("ownerFlags"),
|
||||
UBInt16("fileMode"),
|
||||
UBInt32("union_special")
|
||||
)
|
||||
|
||||
Point = Struct("Point",
|
||||
SBInt16("v"),
|
||||
SBInt16("h")
|
||||
)
|
||||
Rect = Struct("Rect",
|
||||
SBInt16("top"),
|
||||
SBInt16("left"),
|
||||
SBInt16("bottom"),
|
||||
SBInt16("right")
|
||||
)
|
||||
FileInfo = Struct("FileInfo",
|
||||
UBInt32("fileType"),
|
||||
UBInt32("fileCreator"),
|
||||
UBInt16("finderFlags"),
|
||||
Point,
|
||||
UBInt16("reservedField")
|
||||
)
|
||||
ExtendedFileInfo = Struct("ExtendedFileInfo",
|
||||
Array(4, SBInt16("reserved1")),
|
||||
UBInt16("extendedFinderFlags"),
|
||||
SBInt16("reserved2"),
|
||||
SBInt32("putAwayFolderID")
|
||||
)
|
||||
|
||||
FolderInfo = Struct("FolderInfo",
|
||||
Rect,
|
||||
UBInt16("finderFlags"),
|
||||
Point,
|
||||
UBInt16("reservedField")
|
||||
)
|
||||
|
||||
ExtendedFolderInfo = Struct("ExtendedFolderInfo",
|
||||
Point,
|
||||
SBInt32("reserved1"),
|
||||
UBInt16("extendedFinderFlags"),
|
||||
SBInt16("reserved2"),
|
||||
SBInt32("putAwayFolderID")
|
||||
)
|
||||
|
||||
HFSPlusCatalogFolder = Struct("HFSPlusCatalogFolder",
|
||||
UBInt16("flags"),
|
||||
UBInt32("valence"),
|
||||
UBInt32("folderID"),
|
||||
UBInt32("createDate"),
|
||||
UBInt32("contentModDate"),
|
||||
UBInt32("attributeModDate"),
|
||||
UBInt32("accessDate"),
|
||||
UBInt32("backupDate"),
|
||||
HFSPlusBSDInfo,
|
||||
FolderInfo,
|
||||
ExtendedFolderInfo,
|
||||
UBInt32("textEncoding"),
|
||||
UBInt32("reserved")
|
||||
)
|
||||
|
||||
HFSPlusCatalogFile = Struct("HFSPlusCatalogFile",
|
||||
UBInt16("flags"),
|
||||
UBInt32("reserved1"),
|
||||
UBInt32("fileID"),
|
||||
UBInt32("createDate"),
|
||||
UBInt32("contentModDate"),
|
||||
UBInt32("attributeModDate"),
|
||||
UBInt32("accessDate"),
|
||||
UBInt32("backupDate"),
|
||||
HFSPlusBSDInfo,
|
||||
FileInfo,
|
||||
ExtendedFileInfo,
|
||||
UBInt32("textEncoding"),
|
||||
UBInt32("reserved2"),
|
||||
Struct("dataFork", Embed(HFSPlusForkData)),
|
||||
Struct("resourceFork", Embed(HFSPlusForkData))
|
||||
)
|
||||
|
||||
HFSPlusCatalogThread = Struct("HFSPlusCatalogThread",
|
||||
SBInt16("reserved"),
|
||||
UBInt32("parentID"),
|
||||
HFSUniStr255,
|
||||
)
|
||||
|
||||
HFSPlusCatalogData = Struct("HFSPlusCatalogData",
|
||||
UBInt16("recordType"),
|
||||
Switch("data", lambda ctx: ctx["recordType"],
|
||||
{
|
||||
kHFSPlusFolderRecord : HFSPlusCatalogFolder,
|
||||
kHFSPlusFileRecord : HFSPlusCatalogFile,
|
||||
kHFSPlusFolderThreadRecord: HFSPlusCatalogThread,
|
||||
kHFSPlusFileThreadRecord: HFSPlusCatalogThread
|
||||
},
|
||||
default=HFSPlusCatalogFolder #XXX: should not reach
|
||||
)
|
||||
)
|
||||
|
||||
HFSPlusExtentKey = Struct("HFSPlusExtentKey",
|
||||
UBInt16("keyLength"),
|
||||
UBInt8("forkType"),
|
||||
UBInt8("pad"),
|
||||
UBInt32("fileID"),
|
||||
UBInt32("startBlock")
|
||||
)
|
||||
|
||||
HFSPlusDecmpfs = Struct("HFSPlusDecmpfs ",
|
||||
ULInt32("compression_magic"),
|
||||
ULInt32("compression_type"),
|
||||
ULInt64("uncompressed_size"),
|
||||
)
|
||||
|
||||
HFSPlusCmpfRsrcHead = Struct("HFSPlusCmpfRsrcHead",
|
||||
UBInt32("headerSize"),
|
||||
UBInt32("totalSize"),
|
||||
UBInt32("dataSize"),
|
||||
UBInt32("flags")
|
||||
)
|
||||
|
||||
HFSPlusCmpfRsrcBlock = Struct("HFSPlusCmpfRsrcBlock",
|
||||
ULInt32("offset"),
|
||||
ULInt32("size")
|
||||
)
|
||||
|
||||
HFSPlusCmpfRsrcBlockHead = Struct("HFSPlusCmpfRsrcBlockHead",
|
||||
UBInt32("dataSize"),
|
||||
ULInt32("numBlocks"),
|
||||
Array(lambda ctx:ctx["numBlocks"], HFSPlusCmpfRsrcBlock)
|
||||
)
|
||||
|
||||
HFSPlusCmpfEnd = Struct("HFSPlusCmpfEnd",
|
||||
Array(6, UBInt32("pad")),
|
||||
UBInt16("unk1"),
|
||||
UBInt16("unk2"),
|
||||
UBInt16("unk3"),
|
||||
UBInt32("magic"),
|
||||
UBInt32("flags"),
|
||||
UBInt64("size"),
|
||||
UBInt32("unk4")
|
||||
)
|
||||
|
||||
|
||||
"""
|
||||
Journal stuff
|
||||
"""
|
||||
JournalInfoBlock = Struct("JournalInfoBlock",
|
||||
UBInt32("flags"),
|
||||
Array(8, UBInt32("device_signature")),
|
||||
UBInt64("offset"),
|
||||
UBInt64("size"),
|
||||
Array(32, UBInt32("reserved"))
|
||||
)
|
||||
|
||||
journal_header = Struct("journal_header",
|
||||
ULInt32("magic"),
|
||||
ULInt32("endian"),
|
||||
ULInt64("start"),
|
||||
ULInt64("end"),
|
||||
ULInt64("size"),
|
||||
ULInt32("blhdr_size"),
|
||||
ULInt32("checksum"),
|
||||
ULInt32("jhdr_size")
|
||||
)
|
||||
|
||||
block_info = Struct("block_info",
|
||||
ULInt64("bnum"),
|
||||
ULInt32("bsize"),
|
||||
ULInt32("next")
|
||||
)
|
||||
|
||||
block_list_header = Struct("block_list_header",
|
||||
ULInt16("max_blocks"),
|
||||
ULInt16("num_blocks"),
|
||||
ULInt32("bytes_used"),
|
||||
SLInt32("checksum"),
|
||||
UBInt32("pad"),
|
||||
Array(lambda ctx:ctx["num_blocks"], block_info)
|
||||
)
|
@ -0,0 +1,366 @@
|
||||
from cmd import Cmd
|
||||
from firmware.img3 import Img3
|
||||
from hfs.emf import cprotect_xattr, PROTECTION_CLASSES
|
||||
from hfs.hfs import hfs_date
|
||||
from keystore.keybag import Keybag, PROTECTION_CLASSES
|
||||
from nand.carver import NANDCarver
|
||||
from nand.nand import NAND
|
||||
from optparse import OptionParser
|
||||
from util import hexdump, makedirs, write_file, parsePlist, sizeof_fmt,\
|
||||
readPlist
|
||||
from util.bruteforce import bruteforcePasscode
|
||||
from util.ramdiskclient import RamdiskToolClient
|
||||
import os
|
||||
import plistlib
|
||||
import struct
|
||||
from pprint import pprint
|
||||
from keychain import keychain_load
|
||||
from nand.remote import IOFlashStorageKitClient
|
||||
|
||||
DEVICES_NAMES = {"m68ap": "iPhone 2G",
|
||||
"n82ap": "iPhone 3G",
|
||||
"n88ap": "iPhone 3GS",
|
||||
"n90ap": "iPhone 4 GSM",
|
||||
"n92ap": "iPhone 4 CDMA",
|
||||
"n72ap": "iPod Touch 2G",
|
||||
"n18ap": "iPod Touch 3G",
|
||||
"n81ap": "iPod Touch 4G",
|
||||
"k48ap": "iPad 1",
|
||||
}
|
||||
|
||||
def print_device_infos(d):
|
||||
print "Device model:", DEVICES_NAMES.get(d["hwModel"].lower(), d["hwModel"])
|
||||
print "UDID:", d["udid"]
|
||||
print "ECID:", d.get("ECID")
|
||||
print "Serial number:", d["serialNumber"]
|
||||
for k in ["key835", "key89B"]:
|
||||
if d.has_key(k): print "%s: %s" % (k, d[k])
|
||||
|
||||
def grab_system_version(system, device_infos):
|
||||
SystemVersion = system.readFile("/System/Library/CoreServices/SystemVersion.plist", returnString=True)
|
||||
if SystemVersion:
|
||||
SystemVersion = plistlib.readPlistFromString(SystemVersion)
|
||||
print "iOS version: ", SystemVersion.get("ProductVersion")
|
||||
|
||||
def get_device_name(dataVolume):
|
||||
preferences = dataVolume.readFile("/preferences/SystemConfiguration/preferences.plist", returnString=True)
|
||||
if preferences:
|
||||
preferences = parsePlist(preferences)
|
||||
return preferences.get("System", {}).get("System", {}).get("ComputerName", "[device name found]")
|
||||
return "[device name not found]"
|
||||
|
||||
def jailbreak_check(system):
|
||||
#lazy jailbreak check
|
||||
binsh = system.readFile("/bin/sh",returnString=True)
|
||||
if binsh:
|
||||
print "Device is probably jailbroken"
|
||||
#fstab = system.readFile("/etc/fstab",returnString=True)
|
||||
#XXX follow symlinks
|
||||
#if fstab.count("rw") != 1:
|
||||
# print "Device is probably jailbroken"
|
||||
|
||||
def check_kernel(system, device_infos):
|
||||
kernel = system.readFile("/System/Library/Caches/com.apple.kernelcaches/kernelcache",returnString=True)
|
||||
if not kernel: return
|
||||
k3 = Img3("kernel", kernel)
|
||||
if k3.sigcheck(device_infos.get("key89A","").decode("hex")):
|
||||
print "Kernel signature check OK"
|
||||
if kernel[0x40:0x50].startswith("complzss"):
|
||||
print "Kernel is decrypted, probably jailbroken with redsn0w/pwnage tool"
|
||||
|
||||
class ExaminerShell(Cmd):
|
||||
def __init__(self, image, completekey='tab', stdin=None, stdout=None):
|
||||
Cmd.__init__(self, completekey=completekey, stdin=stdin, stdout=stdout)
|
||||
self.curdir = "/"
|
||||
self.rdisk = None
|
||||
if image.filename == "remote":
|
||||
self.rdisk = RamdiskToolClient.get()
|
||||
self.device_infos = image.device_infos
|
||||
self.complete_open = self._complete
|
||||
self.complete_xattr = self._complete
|
||||
self.complete_cprotect = self._complete
|
||||
self.complete_ls = self._complete
|
||||
self.complete_cd = self._complete
|
||||
self.complete_plist = self._complete
|
||||
self.complete_xxd = self._complete
|
||||
self.image = image
|
||||
self.system = image.getPartitionVolume(0)
|
||||
self.data = image.getPartitionVolume(1)
|
||||
self.volume = None
|
||||
self.volname = ""
|
||||
grab_system_version(self.system, self.device_infos)
|
||||
print "Keybag state: %slocked" % (int(self.data.keybag.unlocked) * "un")
|
||||
self.deviceName = get_device_name(self.data)
|
||||
self.do_data("")
|
||||
self.savepath = os.path.join(os.path.dirname(image.filename), "%s.plist" % self.device_infos.udid[:10])
|
||||
#if image.iosVersion > 3 and not image.device_infos.has_key("passcode"):
|
||||
# print "No passcode found in plist file, bruteforce required to access protected data"
|
||||
|
||||
self.carver = None
|
||||
|
||||
def set_partition(self, name, vol):
|
||||
self.volume = vol
|
||||
self.do_cd("/")
|
||||
self.volname = name
|
||||
self.prompt = "(%s-%s) %s " % (self.deviceName, self.volname, self.curdir)
|
||||
|
||||
def do_info(self, p):
|
||||
pprint(self.device_infos)
|
||||
|
||||
def do_save(self, p):
|
||||
print "Save device information plist to [%s]:" % self.savepath,
|
||||
path2 = raw_input()
|
||||
if path2: self.savepath = path2
|
||||
if os.path.exists(self.savepath):
|
||||
print "File already exists, overwrite ? [y/n]:",
|
||||
if raw_input() != "y":
|
||||
return
|
||||
plistlib.writePlist(self.device_infos, self.savepath)
|
||||
|
||||
def do_system(self, p):
|
||||
self.set_partition("system", self.system)
|
||||
|
||||
def do_data(self, p):
|
||||
self.set_partition("data", self.data)
|
||||
|
||||
def do_pix(self, p):
|
||||
self.do_data("")
|
||||
self.do_cd("/mobile/Media/DCIM/100APPLE")
|
||||
|
||||
def do_keychain(self, p):
|
||||
self.data.readFile("/Keychains/keychain-2.db")
|
||||
keychain = keychain_load("keychain-2.db", self.data.keybag, self.image.device_infos["key835"].decode("hex"))
|
||||
keychain.print_all(False)
|
||||
|
||||
def do_keychain_cert(self, p):
|
||||
t = p.split()
|
||||
id = int(t[0])
|
||||
if len(t) == 2: filename = t[1]
|
||||
else: filename = ""
|
||||
keychain = keychain_load("keychain-2.db", self.data.keybag, self.image.device_infos["key835"].decode("hex"))
|
||||
keychain.cert(id, filename)
|
||||
|
||||
def do_keychain_key(self, p):
|
||||
t = p.split()
|
||||
id = int(t[0])
|
||||
if len(t) == 2: filename = t[1]
|
||||
else: filename = ""
|
||||
keychain = keychain_load("keychain-2.db", self.data.keybag, self.image.device_infos["key835"].decode("hex"))
|
||||
keychain.key(id, filename)
|
||||
|
||||
def do_exit(self, p):
|
||||
return True
|
||||
|
||||
def do_quit(self, p):
|
||||
return self.do_exit(p)
|
||||
|
||||
def do_reboot(self, p):
|
||||
if not self.rdisk:
|
||||
self.rdisk = RamdiskToolClient.get()
|
||||
self.rdisk.reboot()
|
||||
return self.do_exit(p)
|
||||
|
||||
def do_pwd(self, p):
|
||||
print self.curdir
|
||||
|
||||
def do_cd(self, p):
|
||||
if len(p) == 0: p = "/"
|
||||
if not p.startswith("/"):
|
||||
new = self.curdir + p
|
||||
else:
|
||||
new = p
|
||||
if not p.endswith("/"): new = new + "/"
|
||||
d = self.volume.ls(new)
|
||||
if d != None:
|
||||
self.curdir = new
|
||||
self.prompt = "(%s-%s) %s " % (self.deviceName, self.volname, new)
|
||||
else:
|
||||
print "%s not found/is not a directory" % new
|
||||
|
||||
def get_path(self, p):
|
||||
path = p
|
||||
if not path.startswith("/"):
|
||||
path = self.curdir + path
|
||||
return path
|
||||
|
||||
def _complete(self, text, line, begidx, endidx):
|
||||
filename = text.split("/")[-1]
|
||||
dirname = "/".join(text.split("/")[:-1])
|
||||
if text.startswith("/"):
|
||||
contents = self.volume.ls(dirname)
|
||||
else:
|
||||
contents = self.volume.ls(self.curdir + dirname)
|
||||
if not contents:
|
||||
return []
|
||||
if dirname != "" and not dirname.endswith("/"):
|
||||
dirname += "/"
|
||||
res = [dirname + x for x in contents.keys() if x.startswith(filename)]
|
||||
return res
|
||||
|
||||
#TODO if size==0 check if compressed
|
||||
def do_ls(self, p):
|
||||
dirDict = self.volume.ls((self.curdir + "/" + p).replace("//","/"))
|
||||
if not dirDict:
|
||||
return
|
||||
for name in sorted(dirDict.keys()):
|
||||
size = ""
|
||||
protection_class = ""
|
||||
record = dirDict[name]
|
||||
if hasattr(record, "fileID"):
|
||||
size = sizeof_fmt(record.dataFork.logicalSize)
|
||||
cprotect = self.volume.getXattr(record.fileID, "com.apple.system.cprotect")
|
||||
if cprotect:
|
||||
protection_class = PROTECTION_CLASSES[struct.unpack("<L", cprotect[8:12])[0]]
|
||||
print "%s\t%s\t%s\t%s" % (name[:30].ljust(30), size.ljust(10), hfs_date(record.createDate), protection_class)
|
||||
|
||||
def do_undelete(self, p):
|
||||
if not self.data.keybag.unlocked:
|
||||
print "Warning, keybag is not unlocked, some files will be inaccessible"
|
||||
if not self.carver:
|
||||
self.carver = NANDCarver(self.data, self.image)
|
||||
if False:#len(p):
|
||||
z = self.volume.catalogTree.getLBAsHax()
|
||||
v = self.volume.getFileRecordForPath(self.curdir)
|
||||
folderId = v.folderID
|
||||
f = lambda k,v: k.parentID == folderId
|
||||
else:
|
||||
z = None
|
||||
f = None
|
||||
self.carver.carveDeletedFiles_fast(z, f)
|
||||
#self.carver.carveDeleteFiles_slow(z, f)
|
||||
|
||||
def do_xattr(self, p):
|
||||
xattr = self.volume.listXattrs(self.get_path(p))
|
||||
if not xattr:
|
||||
return
|
||||
for name, value in xattr.items():
|
||||
print name, value.encode("hex")
|
||||
|
||||
def do_protected_files(self, p):
|
||||
self.data.list_protected_files()
|
||||
|
||||
def do_cprotect(self, p):
|
||||
id = self.volume.getFileIDByPath(self.get_path(p))
|
||||
if not id:
|
||||
return
|
||||
|
||||
cprotect = self.volume.getXattr(id, "com.apple.system.cprotect")
|
||||
if not cprotect:
|
||||
return
|
||||
cp = cprotect_xattr.parse(cprotect)
|
||||
print cp
|
||||
print "Protection class %d => %s" % (cp.persistent_class, PROTECTION_CLASSES.get(cp.persistent_class))
|
||||
if not cp.persistent_key:
|
||||
return
|
||||
fk = self.volume.getFileKeyForCprotect(cprotect)
|
||||
if fk:
|
||||
print "Unwrapped file key : %s" % fk.encode("hex")
|
||||
else:
|
||||
print "Cannot decrypt file key"
|
||||
|
||||
|
||||
def do_open(self, p):
|
||||
path = self.get_path(p)
|
||||
if self.volume.readFile(path):
|
||||
os.startfile(os.path.basename(path))
|
||||
|
||||
def do_xxd(self, p):
|
||||
t = p.split()
|
||||
path = self.get_path(t[0])
|
||||
data = self.volume.readFile(path, returnString=True)
|
||||
if not data:
|
||||
return
|
||||
if len(t) > 1:
|
||||
hexdump(data[:int(t[1])])
|
||||
else:
|
||||
hexdump(data)
|
||||
|
||||
def do_effaceable(self, p):
|
||||
print "Effaceable Lockers"
|
||||
for k,v in self.image.lockers.lockers.items():
|
||||
print "%s: %s" % (k, v.encode("hex"))
|
||||
|
||||
def do_BAG1(self, p):
|
||||
print "BAG1 locker from effaceable storage"
|
||||
bag1 = self.image.lockers.get("BAG1")
|
||||
hexdump(bag1)
|
||||
print "IV:", bag1[4:20].encode("hex")
|
||||
print "Key:", bag1[20:].encode("hex")
|
||||
|
||||
def do_keybag(self, p):
|
||||
self.data.keybag.printClassKeys()
|
||||
|
||||
def do_plist(self, p):
|
||||
d = None
|
||||
data = self.volume.readFile(self.get_path(p), returnString=True)
|
||||
if data:
|
||||
d = parsePlist(data)
|
||||
pprint(d)
|
||||
else:
|
||||
try:
|
||||
d = readPlist(p)
|
||||
if d: pprint(d)
|
||||
except:
|
||||
pass
|
||||
if d and d.has_key("_MKBIV"):
|
||||
print "=>System keybag file"
|
||||
print "_MKBPAYLOAD: encrypted"
|
||||
print "_MKBIV: %s" % d["_MKBIV"].data.encode("hex")
|
||||
print "_MKBWIPEID: 0x%x (%s)" % (d["_MKBWIPEID"], ("%x"%(d["_MKBWIPEID"])).decode("hex"))
|
||||
|
||||
def do_bruteforce(self, p):
|
||||
if bruteforcePasscode(self.image.device_infos, self.data):
|
||||
print "Keybag state: %slocked" % (int(self.data.keybag.unlocked) * "un")
|
||||
self.do_save("")
|
||||
|
||||
def do_ptable(self, p):
|
||||
pt = self.image.getPartitionTable()
|
||||
print "Block device partition table"
|
||||
print "".join(map(lambda x:x.ljust(12), ["Index", "Name", "Start LBA", "End LBA", "Size"]))
|
||||
for i in xrange(len(pt)):
|
||||
p = pt[i]
|
||||
print "".join(map(lambda x:str(x).ljust(12), [i, p.name, p.first_lba, p.last_lba, sizeof_fmt((p.last_lba - p.first_lba)*self.image.pageSize)]))
|
||||
|
||||
def do_nand_dump(self, p):
|
||||
if len(p)==0:
|
||||
print "Usage: nand_dump my_nand.bin"
|
||||
return
|
||||
self.image.dump(p)
|
||||
|
||||
def do_dd(self, p):
|
||||
if len(p)==0:
|
||||
print "Usage: dd output_file.dmg"
|
||||
return
|
||||
self.volume.bdev.dumpToFile(p.split()[0])
|
||||
|
||||
def do_img3(self, p):
|
||||
self.image.extract_img3s()
|
||||
|
||||
def do_shsh(self, p):
|
||||
self.image.extract_shsh()
|
||||
|
||||
def do_debug(self,p):
|
||||
from IPython.Shell import IPShellEmbed
|
||||
ipshell = IPShellEmbed()
|
||||
ipshell(local_ns=locals())
|
||||
|
||||
def main():
|
||||
parser = OptionParser(usage="%prog [options] nand_image.bin device_infos.plist")
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if len(args) >= 2:
|
||||
plistname = args[1]
|
||||
nandimagename = args[0]
|
||||
device_infos = plistlib.readPlist(plistname)
|
||||
print "Loading device information from %s" % plistname
|
||||
else:
|
||||
nandimagename ="remote"
|
||||
client = RamdiskToolClient.get()
|
||||
device_infos = client.device_infos
|
||||
print_device_infos(device_infos)
|
||||
image = NAND(nandimagename, device_infos)
|
||||
|
||||
ExaminerShell(image).cmdloop("")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
224
dump-imessages/iphone-dataprotection/python_scripts/kernel_patcher.py
Executable file
224
dump-imessages/iphone-dataprotection/python_scripts/kernel_patcher.py
Executable file
@ -0,0 +1,224 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import plistlib
|
||||
import zipfile
|
||||
import struct
|
||||
import sys
|
||||
from optparse import OptionParser
|
||||
from Crypto.Cipher import AES
|
||||
from util.lzss import decompress_lzss
|
||||
|
||||
devices = {"n82ap": "iPhone1,2",
|
||||
"n88ap": "iPhone2,1",
|
||||
"n90ap": "iPhone3,1",
|
||||
"n90bap": "iPhone3,2",
|
||||
"n92ap": "iPhone3,3",
|
||||
"n18ap": "iPod3,1",
|
||||
"n81ap": "iPod4,1",
|
||||
"k48ap": "iPad1,1",
|
||||
"n72ap": "iPod2,1",
|
||||
}
|
||||
|
||||
h=lambda x:x.replace(" ","").decode("hex")
|
||||
|
||||
#thx to 0x56
|
||||
patchs_ios6 = {
|
||||
"IOAESAccelerator enable UID" : (h("B0 F5 FA 6F 00 F0 92 80"), h("B0 F5 FA 6F 00 20 00 20")),
|
||||
"_PE_i_can_has_debugger" : (h("80 B1 43 F2 BE 01 C0 F2"), h("01 20 70 47 BE 01 C0 F2")),
|
||||
}
|
||||
|
||||
#https://github.com/comex/datautils0/blob/master/make_kernel_patchfile.c
|
||||
patchs_ios5 = {
|
||||
"CSED" : (h("df f8 88 33 1d ee 90 0f a2 6a 1b 68"), h("df f8 88 33 1d ee 90 0f a2 6a 01 23")),
|
||||
"AMFI" : (h("D0 47 01 21 40 B1 13 35"), h("00 20 01 21 40 B1 13 35")),
|
||||
"_PE_i_can_has_debugger" : (h("38 B1 05 49 09 68 00 29"), h("01 20 70 47 09 68 00 29")),
|
||||
"task_for_pid_0" : (h("00 21 02 91 ba f1 00 0f 01 91 06 d1 02 a8"), h("00 21 02 91 ba f1 00 0f 01 91 06 e0 02 a8")),
|
||||
"IOAESAccelerator enable UID" : (h("67 D0 40 F6"), h("00 20 40 F6")),
|
||||
#not stritcly required, useful for testing
|
||||
"getxattr system": ("com.apple.system.\x00", "com.apple.aaaaaa.\x00"),
|
||||
"IOAES gid": (h("40 46 D4 F8 54 43 A0 47"), h("40 46 D4 F8 43 A0 00 20")),
|
||||
#HAX to fit into the 40 char boot-args (redsn0w 0.9.10)
|
||||
"nand-disable-driver": ("nand-disable-driver\x00", "nand-disable\x00\x00\x00\x00\x00\x00\x00\x00")
|
||||
}
|
||||
|
||||
patchs_ios4 = {
|
||||
"NAND_epoch" : ("\x90\x47\x83\x45", "\x90\x47\x00\x20"),
|
||||
"CSED" : ("\x00\x00\x00\x00\x01\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00", "\x01\x00\x00\x00\x01\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00"),
|
||||
"AMFI" : ("\x01\xD1\x01\x30\x04\xE0\x02\xDB", "\x00\x20\x01\x30\x04\xE0\x02\xDB"),
|
||||
"_PE_i_can_has_debugger" : (h("48 B1 06 4A 13 68 13 B9"), h("01 20 70 47 13 68 13 B9")),
|
||||
"IOAESAccelerator enable UID" : ("\x56\xD0\x40\xF6", "\x00\x00\x40\xF6"),
|
||||
"getxattr system": ("com.apple.system.\x00", "com.apple.aaaaaa.\x00"),
|
||||
}
|
||||
|
||||
patchs_armv6 = {
|
||||
"NAND_epoch" : (h("00 00 5B E1 0E 00 00 0A"), h("00 00 5B E1 0E 00 00 EA")),
|
||||
"CSED" : (h("00 00 00 00 01 00 00 00 80 00 00 00 00 00 00 00"), h("01 00 00 00 01 00 00 00 80 00 00 00 00 00 00 00")),
|
||||
"AMFI" : (h("00 00 00 0A 00 40 A0 E3 04 00 A0 E1 90 80 BD E8"), h("00 00 00 0A 00 40 A0 E3 01 00 A0 E3 90 80 BD E8")),
|
||||
"_PE_i_can_has_debugger" : (h("00 28 0B D0 07 4A 13 68 00 2B 02 D1 03 60 10 68"), h("01 20 70 47 07 4A 13 68 00 2B 02 D1 03 60 10 68")),
|
||||
"IOAESAccelerator enable UID" : (h("5D D0 36 4B 9A 42"), h("00 20 36 4B 9A 42")),
|
||||
"IOAES gid": (h("FA 23 9B 00 9A 42 05 D1"), h("00 20 00 20 9A 42 05 D1")),
|
||||
"nand-disable-driver": ("nand-disable-driver\x00", "nand-disable\x00\x00\x00\x00\x00\x00\x00\x00"),
|
||||
}
|
||||
patchs_ios4_fixnand = {
|
||||
"Please reboot => jump to prepare signature": (h("B0 47 DF F8 E8 04 F3 E1"), h("B0 47 DF F8 E8 04 1D E0")),
|
||||
"prepare signature => jump to write signature": (h("10 43 18 60 DF F8 AC 04"), h("10 43 18 60 05 E1 00 20")),
|
||||
"check write ok => infinite loop" : (h("A3 48 B0 47 01 24"), h("A3 48 B0 47 FE E7"))
|
||||
}
|
||||
|
||||
#grab keys from redsn0w Keys.plist
|
||||
class IPSWkeys(object):
|
||||
def __init__(self, manifest):
|
||||
self.keys = {}
|
||||
buildi = manifest["BuildIdentities"][0]
|
||||
dc = buildi["Info"]["DeviceClass"]
|
||||
build = "%s_%s_%s" % (devices.get(dc,dc), manifest["ProductVersion"], manifest["ProductBuildVersion"])
|
||||
try:
|
||||
rs = plistlib.readPlist("Keys.plist")
|
||||
except:
|
||||
raise Exception("Get Keys.plist from redsn0w and place it in the current directory")
|
||||
for k in rs["Keys"]:
|
||||
if k["Build"] == build:
|
||||
self.keys = k
|
||||
break
|
||||
|
||||
def getKeyIV(self, filename):
|
||||
if not self.keys.has_key(filename):
|
||||
return None, None
|
||||
k = self.keys[filename]
|
||||
return k.get("Key",""), k.get("IV","")
|
||||
|
||||
def decryptImg3(blob, key, iv):
|
||||
assert blob[:4] == "3gmI", "Img3 magic tag"
|
||||
data = ""
|
||||
for i in xrange(20, len(blob)):
|
||||
tag = blob[i:i+4]
|
||||
size, real_size = struct.unpack("<LL", blob[i+4:i+12])
|
||||
if tag[::-1] == "DATA":
|
||||
assert size >= real_size, "Img3 length check"
|
||||
data = blob[i+12:i+size]
|
||||
break
|
||||
i += size
|
||||
return AES.new(key, AES.MODE_CBC, iv).decrypt(data)[:real_size]
|
||||
|
||||
|
||||
def main(ipswname, options):
|
||||
ipsw = zipfile.ZipFile(ipswname)
|
||||
manifest = plistlib.readPlistFromString(ipsw.read("BuildManifest.plist"))
|
||||
kernelname = manifest["BuildIdentities"][0]["Manifest"]["KernelCache"]["Info"]["Path"]
|
||||
devclass = manifest["BuildIdentities"][0]["Info"]["DeviceClass"]
|
||||
kernel = ipsw.read(kernelname)
|
||||
keys = IPSWkeys(manifest)
|
||||
|
||||
key,iv = keys.getKeyIV(kernelname)
|
||||
|
||||
if key == None:
|
||||
print "No keys found for kernel"
|
||||
return
|
||||
|
||||
print "Decrypting %s" % kernelname
|
||||
kernel = decryptImg3(kernel, key.decode("hex"), iv.decode("hex"))
|
||||
assert kernel.startswith("complzss"), "Decrypted kernelcache does not start with \"complzss\" => bad key/iv ?"
|
||||
|
||||
print "Unpacking ..."
|
||||
kernel = decompress_lzss(kernel)
|
||||
assert kernel.startswith("\xCE\xFA\xED\xFE"), "Decompressed kernelcache does not start with 0xFEEDFACE"
|
||||
|
||||
patchs = patchs_ios5
|
||||
if devclass in ["n82ap", "n72ap"]:
|
||||
print "Using ARMv6 kernel patches"
|
||||
patchs = patchs_armv6
|
||||
elif manifest["ProductVersion"].startswith("4."):
|
||||
print "Using iOS 4 kernel patches"
|
||||
patchs = patchs_ios4
|
||||
elif manifest["ProductVersion"].startswith("6."):
|
||||
print "Using iOS 6 kernel patches"
|
||||
patchs = patchs_ios6
|
||||
|
||||
if options.fixnand:
|
||||
if patchs != patchs_ios4:
|
||||
print "FAIL : use --fixnand with iOS 4.x IPSW"
|
||||
return
|
||||
patchs.update(patchs_ios4_fixnand)
|
||||
kernelname = "fix_nand_" + kernelname
|
||||
print "WARNING : only use this kernel to fix NAND epoch brick"
|
||||
|
||||
for p in patchs:
|
||||
print "Doing %s patch" % p
|
||||
s, r = patchs[p]
|
||||
c = kernel.count(s)
|
||||
if c != 1:
|
||||
print "=> FAIL, count=%d, do not boot that kernel it wont work" % c
|
||||
else:
|
||||
kernel = kernel.replace(s,r)
|
||||
|
||||
outkernel = "%s.patched" % kernelname
|
||||
open(outkernel, "wb").write(kernel)
|
||||
print "Patched kernel written to %s" % outkernel
|
||||
|
||||
ramdiskname = manifest["BuildIdentities"][0]["Manifest"]["RestoreRamDisk"]["Info"]["Path"]
|
||||
key,iv = keys.getKeyIV("Ramdisk")
|
||||
ramdisk = ipsw.read(ramdiskname)
|
||||
|
||||
print "Decrypting %s" % ramdiskname
|
||||
ramdisk = decryptImg3(ramdisk, key.decode("hex"), iv.decode("hex"))
|
||||
|
||||
assert ramdisk[0x400:0x402] == "H+", "H+ magic not found in decrypted ramdisk => bad key/iv ?"
|
||||
|
||||
customramdisk = "myramdisk_%s.dmg" % devclass
|
||||
f = open(customramdisk, "wb")
|
||||
f.write(ramdisk)
|
||||
f.close()
|
||||
|
||||
if manifest["ProductVersion"].startswith("6."):
|
||||
print "Run ./build_ramdisk_ios6.sh %s" % customramdisk
|
||||
print "Then redsn0w -i %s -r %s -k %s -a \"-v rd=md0 amfi=0xff cs_enforcement_disable=1\"" % (ipswname, customramdisk, outkernel)
|
||||
return
|
||||
|
||||
build_cmd = "./build_ramdisk.sh %s %s %s %s %s" % (ipswname, ramdiskname, key, iv, customramdisk)
|
||||
rs_cmd = "redsn0w -i %s -r %s -k %s" % (ipswname, customramdisk, outkernel)
|
||||
rdisk_script="""#!/bin/sh
|
||||
|
||||
for VER in 4.2 4.3 5.0 5.1 6.0
|
||||
do
|
||||
if [ -f "/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$VER.sdk/System/Library/Frameworks/IOKit.framework/IOKit" ];
|
||||
then
|
||||
SDKVER=$VER
|
||||
echo "Found iOS SDK $SDKVER"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$SDKVER" == "" ]; then
|
||||
echo "iOS SDK not found"
|
||||
exit
|
||||
fi
|
||||
SDKVER=$SDKVER make -C ramdisk_tools
|
||||
|
||||
%s
|
||||
|
||||
if [ "$?" == "0" ]
|
||||
then
|
||||
echo "You can boot the ramdisk using the following command (fix paths)"
|
||||
echo "%s"
|
||||
echo "Add -a \\"-v rd=md0 nand-disable=1\\" for nand dump/read only access"
|
||||
fi
|
||||
""" % (build_cmd, rs_cmd)
|
||||
|
||||
scriptname="make_ramdisk_%s.sh" % devclass
|
||||
f=open(scriptname, "wb")
|
||||
f.write(rdisk_script)
|
||||
f.close()
|
||||
|
||||
print "Created script %s, you can use it to (re)build the ramdisk"% scriptname
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = OptionParser(usage="%prog [options] IPSW")
|
||||
parser.add_option("-f", "--fixnand",
|
||||
action="store_true", dest="fixnand", default=False,
|
||||
help="Apply NAND epoch fix kernel patches")
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
if len(args) < 1:
|
||||
parser.print_help()
|
||||
else:
|
||||
main(args[0], options)
|
@ -0,0 +1,12 @@
|
||||
import sqlite3
|
||||
from keychain3 import Keychain3
|
||||
from keychain4 import Keychain4
|
||||
|
||||
def keychain_load(filename, keybag, key835):
|
||||
version = sqlite3.connect(filename).execute("SELECT version FROM tversion").fetchone()[0]
|
||||
#print "Keychain version : %d" % version
|
||||
if version == 3:
|
||||
return Keychain3(filename, key835)
|
||||
elif version >= 4:
|
||||
return Keychain4(filename, keybag)
|
||||
raise Exception("Unknown keychain version %d" % version)
|
@ -0,0 +1,237 @@
|
||||
from store import PlistKeychain, SQLiteKeychain
|
||||
from util import write_file
|
||||
from util.asciitables import print_table
|
||||
from util.bplist import BPlistReader
|
||||
from util.cert import RSA_KEY_DER_to_PEM, CERT_DER_to_PEM
|
||||
import M2Crypto
|
||||
import hashlib
|
||||
import plistlib
|
||||
import sqlite3
|
||||
import string
|
||||
import struct
|
||||
|
||||
KSECATTRACCESSIBLE = {
|
||||
6: "kSecAttrAccessibleWhenUnlocked",
|
||||
7: "kSecAttrAccessibleAfterFirstUnlock",
|
||||
8: "kSecAttrAccessibleAlways",
|
||||
9: "kSecAttrAccessibleWhenUnlockedThisDeviceOnly",
|
||||
10: "kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly",
|
||||
11: "kSecAttrAccessibleAlwaysThisDeviceOnly"
|
||||
}
|
||||
printset = set(string.printable)
|
||||
|
||||
def render_password(p):
|
||||
data = p["data"]
|
||||
if data != None and data.startswith("bplist") and data.find("\x00") != -1:
|
||||
pl = BPlistReader.plistWithString(p["data"])
|
||||
filename = "%s_%s_%d.plist" % (p["svce"],p["acct"],p["rowid"])
|
||||
plistlib.writePlist(pl, filename)
|
||||
#write_file("bin_"+filename, p["data"])
|
||||
data = filename
|
||||
|
||||
if p.has_key("srvr"):
|
||||
return "%s:%d;%s;%s" % (p["srvr"],p["port"],p["acct"],data)
|
||||
else:
|
||||
return "%s;%s;%s" % (p["svce"],p["acct"],data)
|
||||
|
||||
class Keychain(object):
|
||||
def __init__(self, filename):
|
||||
magic = open(filename, "rb").read(16)
|
||||
if magic.startswith("SQLite"):
|
||||
self.store = SQLiteKeychain(filename)
|
||||
elif magic.startswith("bplist"):
|
||||
self.store = PlistKeychain(filename)
|
||||
else:
|
||||
raise Exception("Unknown keychain format for %s" % filename)
|
||||
self.bsanitize = True
|
||||
self.items = {"genp": None, "inet": None, "cert": None, "keys": None}
|
||||
|
||||
def decrypt_data(self, data):
|
||||
return data #override this method
|
||||
|
||||
def decrypt_item(self, res):
|
||||
res["data"] = self.decrypt_data(res["data"])
|
||||
if not res["data"]:
|
||||
return {}
|
||||
return res
|
||||
|
||||
def get_items(self, table):
|
||||
if self.items[table]:
|
||||
return self.items[table]
|
||||
self.items[table] = filter(lambda x:x!={}, map(self.decrypt_item, self.store.get_items(table)))
|
||||
return self.items[table]
|
||||
|
||||
def get_passwords(self):
|
||||
return self.get_items("genp")
|
||||
|
||||
def get_inet_passwords(self):
|
||||
return self.get_items("inet")
|
||||
|
||||
def get_keys(self):
|
||||
return self.get_items("keys")
|
||||
|
||||
def get_cert(self):
|
||||
return self.get_items("cert")
|
||||
|
||||
def get_certs(self):
|
||||
certs = {}
|
||||
pkeys = {}
|
||||
keys = self.get_keys()
|
||||
for row in self.get_cert():
|
||||
cert = M2Crypto.X509.load_cert_der_string(row["data"])
|
||||
subject = cert.get_subject().as_text()
|
||||
common_name = cert.get_subject().get_entries_by_nid(M2Crypto.X509.X509_Name.nid['CN'])
|
||||
if len(common_name):
|
||||
subject = str(common_name[0].get_data())
|
||||
else:
|
||||
subject = "cn_unknown_%d" % row["rowid"]
|
||||
certs[subject+ "_%s" % row["agrp"]] = cert
|
||||
|
||||
#print subject
|
||||
#print "Access :\t" + KSECATTRACCESSIBLE.get(row["clas"])
|
||||
|
||||
for k in keys:
|
||||
if k["agrp"] == row["agrp"] and k["klbl"] == row["pkhh"]:
|
||||
pkey_der = k["data"]
|
||||
pkey_der = RSA_KEY_DER_to_PEM(pkey_der)
|
||||
pkeys[subject + "_%s" % row["agrp"]] = pkey_der
|
||||
break
|
||||
|
||||
return certs, pkeys
|
||||
|
||||
|
||||
def save_passwords(self):
|
||||
passwords = "\n".join(map(render_password, self.get_passwords()))
|
||||
inetpasswords = "\n".join(map(render_password, self.get_inet_passwords()))
|
||||
print "Writing passwords to keychain.csv"
|
||||
write_file("keychain.csv", "Passwords;;\n"+passwords+"\nInternet passwords;;\n"+ inetpasswords)
|
||||
|
||||
def save_certs_keys(self):
|
||||
certs, pkeys = self.get_certs()
|
||||
for c in certs:
|
||||
filename = c + ".crt"
|
||||
print "Saving certificate %s" % filename
|
||||
certs[c].save_pem(filename)
|
||||
for k in pkeys:
|
||||
filename = k + ".key"
|
||||
print "Saving key %s" % filename
|
||||
write_file(filename, pkeys[k])
|
||||
|
||||
def sanitize(self, pw):
|
||||
if pw.startswith("bplist"):
|
||||
return "<binary plist data>"
|
||||
elif not set(pw).issubset(printset):
|
||||
pw = ">"+ pw.encode("hex")
|
||||
#pw = "<binary data> : " + pw.encode("hex")
|
||||
if self.bsanitize:
|
||||
return pw[:2] + ("*" * (len(pw) - 2))
|
||||
return pw
|
||||
|
||||
def print_all(self, sanitize=True):
|
||||
self.bsanitize = sanitize
|
||||
headers = ["Service", "Account", "Data", "Access group", "Protection class"]
|
||||
rows = []
|
||||
for p in self.get_passwords():
|
||||
row = [p.get("svce","?"),
|
||||
str(p.get("acct","?"))[:40],
|
||||
self.sanitize(p.get("data","?"))[:20],
|
||||
p.get("agrp","?"),
|
||||
KSECATTRACCESSIBLE.get(p["clas"])[18:]]
|
||||
rows.append(row)
|
||||
|
||||
print_table("Passwords", headers, rows)
|
||||
|
||||
headers = ["Server", "Account", "Data", "Access group", "Protection class"]
|
||||
rows = []
|
||||
|
||||
for p in self.get_inet_passwords():
|
||||
addr = "?"
|
||||
if p.has_key("srvr"):
|
||||
addr = p["srvr"] + ":" + str(p["port"])
|
||||
row = [addr,
|
||||
str(p.get("acct","?")),
|
||||
self.sanitize(p.get("data","?"))[:20],
|
||||
p.get("agrp","?"),
|
||||
KSECATTRACCESSIBLE.get(p["clas"])[18:]]
|
||||
rows.append(row)
|
||||
|
||||
print_table("Internet Passwords", headers, rows)
|
||||
|
||||
headers = ["Id", "Common Name", "Access group", "Protection class"]
|
||||
rows = []
|
||||
c = {}
|
||||
|
||||
for row in self.get_cert():
|
||||
subject = "?"
|
||||
if row.has_key("data"):
|
||||
cert = M2Crypto.X509.load_cert_der_string(row["data"])
|
||||
subject = cert.get_subject().as_text()
|
||||
common_name = cert.get_subject().get_entries_by_nid(M2Crypto.X509.X509_Name.nid['CN'])
|
||||
if len(common_name):
|
||||
subject = str(common_name[0].get_data())
|
||||
else:
|
||||
subject = "cn_unknown_%d" % row["rowid"]
|
||||
c[hashlib.sha1(str(row["pkhh"])).hexdigest() + row["agrp"]] = subject
|
||||
row = [str(row["rowid"]),
|
||||
subject[:81],
|
||||
row.get("agrp","?")[:31],
|
||||
KSECATTRACCESSIBLE.get(row["clas"])[18:]
|
||||
]
|
||||
rows.append(row)
|
||||
|
||||
print_table("Certificates", headers, rows)
|
||||
|
||||
headers = ["Id", "Label", "Common Name", "Access group", "Protection class"]
|
||||
rows = []
|
||||
for row in self.get_keys():
|
||||
subject = ""
|
||||
if row.has_key("klbl"):
|
||||
subject = c.get(hashlib.sha1(str(row["klbl"])).hexdigest() + row["agrp"], "")
|
||||
row = [str(row["rowid"]), row.get("labl", "?")[:30], subject[:39], row.get("agrp","?")[:31],
|
||||
KSECATTRACCESSIBLE.get(row["clas"])[18:]]
|
||||
rows.append(row)
|
||||
print_table("Keys", headers, rows)
|
||||
|
||||
def get_push_token(self):
|
||||
for p in self.get_passwords():
|
||||
if p["svce"] == "push.apple.com":
|
||||
return p["data"]
|
||||
|
||||
def get_managed_configuration(self):
|
||||
for p in self.get_passwords():
|
||||
if p["acct"] == "Private" and p["svce"] == "com.apple.managedconfiguration" and p["agrp"] == "apple":
|
||||
return BPlistReader.plistWithString(p["data"])
|
||||
|
||||
def _diff(self, older, res, func, key):
|
||||
res.setdefault(key, [])
|
||||
current = func(self)
|
||||
for p in func(older):
|
||||
if not p in current and not p in res[key]:
|
||||
res[key].append(p)
|
||||
|
||||
def diff(self, older, res):
|
||||
self._diff(older, res, Keychain.get_passwords, "genp")
|
||||
self._diff(older, res, Keychain.get_inet_passwords, "inet")
|
||||
self._diff(older, res, Keychain.get_cert, "cert")
|
||||
self._diff(older, res, Keychain.get_keys, "keys")
|
||||
|
||||
def cert(self, rowid, filename=""):
|
||||
for row in self.get_cert():
|
||||
if row["rowid"] == rowid:
|
||||
blob = CERT_DER_to_PEM(row["data"])
|
||||
if filename:
|
||||
write_file(filename, blob)
|
||||
cert = M2Crypto.X509.load_cert_der_string(row["data"])
|
||||
print cert.as_text()
|
||||
return
|
||||
|
||||
def key(self, rowid, filename=""):
|
||||
for row in self.get_keys():
|
||||
if row["rowid"] == rowid:
|
||||
blob = RSA_KEY_DER_to_PEM(row["data"])
|
||||
if filename:
|
||||
write_file(filename, blob)
|
||||
#k = M2Crypto.RSA.load_key_string(blob)
|
||||
print blob
|
||||
return
|
||||
|
@ -0,0 +1,44 @@
|
||||
from keychain import Keychain
|
||||
from crypto.aes import AESdecryptCBC, AESencryptCBC
|
||||
import hashlib
|
||||
|
||||
class Keychain3(Keychain):
|
||||
def __init__(self, filename, key835=None):
|
||||
Keychain.__init__(self, filename)
|
||||
self.key835 = key835
|
||||
|
||||
def decrypt_data(self, data):
|
||||
if data == None:
|
||||
return ""
|
||||
data = str(data)
|
||||
|
||||
if not self.key835:
|
||||
print "Key 835 not availaible"
|
||||
return ""
|
||||
|
||||
data = AESdecryptCBC(data[16:], self.key835, data[:16], padding=True)
|
||||
|
||||
#data_column = iv + AES128_K835(iv, data + sha1(data))
|
||||
if hashlib.sha1(data[:-20]).digest() != data[-20:]:
|
||||
print "data field hash mismatch : bad key ?"
|
||||
return "ERROR decrypting data : bad key ?"
|
||||
|
||||
return data[:-20]
|
||||
|
||||
def change_key835(self, newkey):
|
||||
tables = {"genp": "SELECT rowid, data FROM genp",
|
||||
"inet": "SELECT rowid, data FROM inet",
|
||||
"cert": "SELECT rowid, data FROM cert",
|
||||
"keys": "SELECT rowid, data FROM keys"}
|
||||
|
||||
for t in tables.keys():
|
||||
for row in self.conn.execute(tables[t]):
|
||||
rowid = row["rowid"]
|
||||
data = str(row["data"])
|
||||
iv = data[:16]
|
||||
data = AESdecryptCBC(data[16:], self.key835, iv)
|
||||
data = AESencryptCBC(data, newkey, iv)
|
||||
data = iv + data
|
||||
data = buffer(data)
|
||||
self.conn.execute("UPDATE %s SET data=? WHERE rowid=?" % t, (data, rowid))
|
||||
self.conn.commit()
|
@ -0,0 +1,92 @@
|
||||
from crypto.aes import AESdecryptCBC
|
||||
import struct
|
||||
|
||||
"""
|
||||
iOS 4 keychain-2.db data column format
|
||||
|
||||
version 0x00000000
|
||||
key class 0x00000008
|
||||
kSecAttrAccessibleWhenUnlocked 6
|
||||
kSecAttrAccessibleAfterFirstUnlock 7
|
||||
kSecAttrAccessibleAlways 8
|
||||
kSecAttrAccessibleWhenUnlockedThisDeviceOnly 9
|
||||
kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly 10
|
||||
kSecAttrAccessibleAlwaysThisDeviceOnly 11
|
||||
wrapped AES256 key 0x28 bytes (passed to kAppleKeyStoreKeyUnwrap)
|
||||
encrypted data (AES 256 CBC zero IV)
|
||||
"""
|
||||
from keychain import Keychain
|
||||
from crypto.gcm import gcm_decrypt
|
||||
from util.bplist import BPlistReader
|
||||
|
||||
KSECATTRACCESSIBLE = {
|
||||
6: "kSecAttrAccessibleWhenUnlocked",
|
||||
7: "kSecAttrAccessibleAfterFirstUnlock",
|
||||
8: "kSecAttrAccessibleAlways",
|
||||
9: "kSecAttrAccessibleWhenUnlockedThisDeviceOnly",
|
||||
10: "kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly",
|
||||
11: "kSecAttrAccessibleAlwaysThisDeviceOnly"
|
||||
}
|
||||
|
||||
class Keychain4(Keychain):
|
||||
def __init__(self, filename, keybag):
|
||||
if not keybag.unlocked:
|
||||
print "Keychain object created with locked keybag, some items won't be decrypted"
|
||||
Keychain.__init__(self, filename)
|
||||
self.keybag = keybag
|
||||
|
||||
def decrypt_item(self, row):
|
||||
version, clas = struct.unpack("<LL", row["data"][0:8])
|
||||
if self.keybag.isBackupKeybag():
|
||||
if clas >= 9 and not self.keybag.deviceKey:
|
||||
return {}
|
||||
if version >= 2:
|
||||
dict = self.decrypt_blob(row["data"])
|
||||
if not dict:
|
||||
return {"clas": clas, "rowid": row["rowid"]}
|
||||
if dict.has_key("v_Data"):
|
||||
dict["data"] = dict["v_Data"].data
|
||||
else:
|
||||
dict["data"] = ""
|
||||
dict["rowid"] = row["rowid"]
|
||||
dict["clas"] = clas
|
||||
return dict
|
||||
row["clas"] = clas
|
||||
return Keychain.decrypt_item(self, row)
|
||||
|
||||
def decrypt_data(self, data):
|
||||
data = self.decrypt_blob(data)
|
||||
if type(data) == dict:
|
||||
return data["v_Data"].data
|
||||
return data
|
||||
|
||||
def decrypt_blob(self, blob):
|
||||
if blob == None:
|
||||
return ""
|
||||
|
||||
if len(blob) < 48:
|
||||
print "keychain blob length must be >= 48"
|
||||
return
|
||||
|
||||
version, clas = struct.unpack("<LL",blob[0:8])
|
||||
self.clas=clas
|
||||
if version == 0:
|
||||
wrappedkey = blob[8:8+40]
|
||||
encrypted_data = blob[48:]
|
||||
elif version == 2:
|
||||
l = struct.unpack("<L",blob[8:12])[0]
|
||||
wrappedkey = blob[12:12+l]
|
||||
encrypted_data = blob[12+l:-16]
|
||||
else:
|
||||
raise Exception("unknown keychain verson ", version)
|
||||
return
|
||||
|
||||
unwrappedkey = self.keybag.unwrapKeyForClass(clas, wrappedkey, False)
|
||||
if not unwrappedkey:
|
||||
return
|
||||
|
||||
if version == 0:
|
||||
return AESdecryptCBC(encrypted_data, unwrappedkey, padding=True)
|
||||
elif version == 2:
|
||||
binaryplist = gcm_decrypt(unwrappedkey, "", encrypted_data, "", blob[-16:])
|
||||
return BPlistReader(binaryplist).parse()
|
@ -0,0 +1,27 @@
|
||||
"""
|
||||
0
|
||||
1:MCSHA256DigestWithSalt
|
||||
2:SecKeyFromPassphraseDataHMACSHA1
|
||||
"""
|
||||
from crypto.PBKDF2 import PBKDF2
|
||||
import plistlib
|
||||
import hashlib
|
||||
|
||||
SALT1 = "F92F024CA2CB9754".decode("hex")
|
||||
|
||||
hashMethods={
|
||||
1: (lambda p,salt:hashlib.sha256(SALT1 + p)),
|
||||
2: (lambda p,salt:PBKDF2(p, salt, iterations=1000).read(20))
|
||||
}
|
||||
|
||||
def bruteforce_old_pass(h):
|
||||
salt = h["salt"].data
|
||||
hash = h["hash"].data
|
||||
f = hashMethods.get(h["hashMethod"])
|
||||
|
||||
if f:
|
||||
print "Bruteforcing hash %s (4 digits)" % hash.encode("hex")
|
||||
for i in xrange(10000):
|
||||
p = "%04d" % (i % 10000)
|
||||
if f(p,salt) == hash:
|
||||
return p
|
@ -0,0 +1,56 @@
|
||||
import plistlib
|
||||
import sqlite3
|
||||
import struct
|
||||
from util import readPlist
|
||||
|
||||
class KeychainStore(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def convertDict(self, d):
|
||||
return d
|
||||
|
||||
def returnResults(self, r):
|
||||
for a in r:
|
||||
yield self.convertDict(a)
|
||||
|
||||
def get_items(self, table):
|
||||
return []
|
||||
|
||||
class SQLiteKeychain(KeychainStore):
|
||||
def __init__(self, filename):
|
||||
self.conn = sqlite3.connect(filename)
|
||||
self.conn.row_factory = sqlite3.Row
|
||||
|
||||
def convertDict(self, row):
|
||||
d = dict(row)
|
||||
for k,v in d.items():
|
||||
if type(v) == buffer:
|
||||
d[k] = str(v)
|
||||
return d
|
||||
|
||||
def get_items(self, table):
|
||||
sql = {"genp": "SELECT rowid, data, svce, acct, agrp FROM genp",
|
||||
"inet": "SELECT rowid, data, acct, srvr, port, agrp FROM inet",
|
||||
"cert": "SELECT rowid, data, pkhh, agrp FROM cert",
|
||||
"keys": "SELECT rowid, data, klbl, agrp FROM keys"}
|
||||
return self.returnResults(self.conn.execute(sql[table]))
|
||||
|
||||
class PlistKeychain(KeychainStore):
|
||||
def __init__(self, filename):
|
||||
self.plist = readPlist(filename)
|
||||
|
||||
def convertDict(self, d):
|
||||
for k, v in d.items():
|
||||
if isinstance(v, plistlib.Data):
|
||||
if k == "v_Data":
|
||||
d["data"] = v.data
|
||||
elif k == "v_PersistentRef":
|
||||
#format tablename (4 chars) + rowid (64 bits)
|
||||
d["rowid"] = struct.unpack("<Q", v.data[-8:])[0]
|
||||
else:
|
||||
d[k] = v.data
|
||||
return d
|
||||
|
||||
def get_items(self, table):
|
||||
return self.returnResults(self.plist.get(table, []))
|
@ -0,0 +1,72 @@
|
||||
from optparse import OptionParser
|
||||
from keystore.keybag import Keybag
|
||||
from keychain import keychain_load
|
||||
from keychain.managedconfiguration import bruteforce_old_pass
|
||||
from util import readPlist
|
||||
from keychain.keychain4 import Keychain4
|
||||
import plistlib
|
||||
|
||||
def main():
|
||||
parser = OptionParser(usage="%prog keychain.db/keychain-backup.plist keyfile.plist/Manifest.plist")
|
||||
parser.add_option("-d", "--display", dest="display", action="store_true", default=False,
|
||||
help="Show keychain items on stdout")
|
||||
parser.add_option("-s", "--sanitize", dest="sanitize", action="store_true", default=False,
|
||||
help="Hide secrets on stdout with ***")
|
||||
parser.add_option("-p", "--passwords", dest="passwords", action="store_true", default=False,
|
||||
help="Save generic & internet passwords as CSV file")
|
||||
parser.add_option("-c", "--certs", dest="certs", action="store_true", default=False,
|
||||
help="Extract certificates and keys")
|
||||
parser.add_option("-o", "--old", dest="oldpass", action="store_true", default=False,
|
||||
help="Bruteforce old passcodes")
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
if len(args) < 2:
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
p = readPlist(args[1])
|
||||
|
||||
if p.has_key("BackupKeyBag"):
|
||||
deviceKey = None
|
||||
if p.has_key("key835"):
|
||||
deviceKey = p["key835"].decode("hex")
|
||||
else:
|
||||
if not p["IsEncrypted"]:
|
||||
print "This backup is not encrypted, without key 835 nothing in the keychain can be decrypted"
|
||||
print "If you have key835 for device %s enter it (in hex)" % p["Lockdown"]["UniqueDeviceID"]
|
||||
d = raw_input()
|
||||
if len(d) == 32:
|
||||
p["key835"] = d
|
||||
deviceKey = d.decode("hex")
|
||||
plistlib.writePlist(p, args[1])
|
||||
|
||||
kb = Keybag.createWithBackupManifest(p, p.get("password",""), deviceKey)
|
||||
if not kb:
|
||||
return
|
||||
k = Keychain4(args[0], kb)
|
||||
else:
|
||||
kb = Keybag.createWithPlist(p)
|
||||
k = keychain_load(args[0], kb, p["key835"].decode("hex"))
|
||||
|
||||
if options.display:
|
||||
k.print_all(options.sanitize)
|
||||
if options.passwords:
|
||||
k.save_passwords()
|
||||
if options.certs:
|
||||
k.save_certs_keys()
|
||||
|
||||
if options.oldpass:
|
||||
mc = k.get_managed_configuration()
|
||||
if not mc:
|
||||
print "Managed configuration not found"
|
||||
return
|
||||
print "Bruteforcing %d old passcodes" % len(mc.get("history",[]))
|
||||
for h in mc["history"]:
|
||||
p = bruteforce_old_pass(h)
|
||||
if p:
|
||||
print "Found : %s" % p
|
||||
else:
|
||||
print "Not Found"
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,69 @@
|
||||
from construct import RepeatUntil
|
||||
from construct.core import Struct, Union
|
||||
from construct.macros import *
|
||||
from crypto.aes import AESdecryptCBC
|
||||
from crypto.aeswrap import AESUnwrap
|
||||
from zipfile import crc32
|
||||
import struct
|
||||
|
||||
Dkey = 0x446B6579
|
||||
EMF = 0x454D4621
|
||||
BAG1 = 0x42414731
|
||||
DONE = 0x444f4e45 #locker sentinel
|
||||
#**** = 0x2A2A2A2A #wildcard for erase
|
||||
|
||||
#MAGIC (kL) | LEN (2bytes) | TAG (4) | DATA (LEN)
|
||||
Locker = Struct("Locker",
|
||||
String("magic",2),
|
||||
ULInt16("length"),
|
||||
Union("tag",
|
||||
ULInt32("int"),
|
||||
String("tag",4))
|
||||
,
|
||||
String("data", lambda ctx: ctx["length"])
|
||||
)
|
||||
|
||||
Lockers = RepeatUntil(lambda obj, ctx: obj.tag.int == DONE, Locker)
|
||||
|
||||
def xor_strings(s, key):
|
||||
res = ""
|
||||
for i in xrange(len(s)):
|
||||
res += chr(ord(s[i]) ^ ord(key[i%len(key)]))
|
||||
return res
|
||||
|
||||
def check_effaceable_header(plog):
|
||||
z = xor_strings(plog[:16], plog[16:32])
|
||||
if z[:4] != "ecaF":
|
||||
return False
|
||||
plog_generation = struct.unpack("<L", plog[0x38:0x3C])[0]
|
||||
print "Effaceable generation" , plog_generation
|
||||
plog_crc = crc32(plog[0x40:0x40 + 960], crc32(plog[0x20:0x3C], crc32(z))) & 0xffffffff
|
||||
assert plog_crc == struct.unpack("<L", plog[0x3C:0x40])[0] , "Effaceable CRC"
|
||||
print "Effaceable CRC OK"
|
||||
return True
|
||||
|
||||
class EffaceableLockers(object):
|
||||
def __init__(self, data):
|
||||
self.lockers = {}
|
||||
for l in Lockers.parse(data):
|
||||
tag = l.tag.int & ~0x80000000
|
||||
tag = struct.pack("<L", tag)[::-1]
|
||||
self.lockers[tag] = l.data
|
||||
|
||||
def display(self):
|
||||
print "Lockers : " + ", ".join(sorted(self.lockers.keys()))
|
||||
|
||||
def get(self, tag):
|
||||
return self.lockers.get(tag)
|
||||
|
||||
def get_DKey(self, k835):
|
||||
if self.lockers.has_key("Dkey"):
|
||||
return AESUnwrap(k835, self.lockers["Dkey"])
|
||||
|
||||
def get_EMF(self, k89b):
|
||||
if self.lockers.has_key("LwVM"):
|
||||
lwvm = AESdecryptCBC(self.lockers["LwVM"], k89b)
|
||||
return lwvm[-32:]
|
||||
elif self.lockers.has_key("EMF!"):
|
||||
return AESdecryptCBC(self.lockers["EMF!"][4:], k89b)
|
||||
|
@ -0,0 +1,265 @@
|
||||
from crypto.PBKDF2 import PBKDF2
|
||||
from crypto.aes import AESdecryptCBC
|
||||
from crypto.aeswrap import AESUnwrap
|
||||
from crypto.aeswrap import AESwrap
|
||||
from crypto.curve25519 import curve25519
|
||||
from hashlib import sha256, sha1
|
||||
from util.bplist import BPlistReader
|
||||
from util.tlv import loopTLVBlocks, tlvToDict
|
||||
import hmac
|
||||
import struct
|
||||
|
||||
KEYBAG_TAGS = ["VERS", "TYPE", "UUID", "HMCK", "WRAP", "SALT", "ITER"]
|
||||
CLASSKEY_TAGS = ["CLAS","WRAP","WPKY", "KTYP", "PBKY"] #UUID
|
||||
KEYBAG_TYPES = ["System", "Backup", "Escrow", "OTA (icloud)"]
|
||||
SYSTEM_KEYBAG = 0
|
||||
BACKUP_KEYBAG = 1
|
||||
ESCROW_KEYBAG = 2
|
||||
OTA_KEYBAG = 3
|
||||
|
||||
#ORed flags in TYPE since iOS 5
|
||||
FLAG_UIDPLUS = 0x40000000 # UIDPlus hardware key (>= iPad 3)
|
||||
FLAG_UNKNOWN = 0x80000000
|
||||
|
||||
WRAP_DEVICE = 1
|
||||
WRAP_PASSCODE = 2
|
||||
|
||||
KEY_TYPES = ["AES", "Curve25519"]
|
||||
PROTECTION_CLASSES={
|
||||
1:"NSFileProtectionComplete",
|
||||
2:"NSFileProtectionCompleteUnlessOpen",
|
||||
3:"NSFileProtectionCompleteUntilFirstUserAuthentication",
|
||||
4:"NSFileProtectionNone",
|
||||
5:"NSFileProtectionRecovery?",
|
||||
|
||||
6: "kSecAttrAccessibleWhenUnlocked",
|
||||
7: "kSecAttrAccessibleAfterFirstUnlock",
|
||||
8: "kSecAttrAccessibleAlways",
|
||||
9: "kSecAttrAccessibleWhenUnlockedThisDeviceOnly",
|
||||
10: "kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly",
|
||||
11: "kSecAttrAccessibleAlwaysThisDeviceOnly"
|
||||
}
|
||||
|
||||
"""
|
||||
device key : key 0x835
|
||||
"""
|
||||
class Keybag(object):
|
||||
def __init__(self, data):
|
||||
self.type = None
|
||||
self.uuid = None
|
||||
self.wrap = None
|
||||
self.deviceKey = None
|
||||
self.unlocked = False
|
||||
self.passcodeComplexity = 0
|
||||
self.attrs = {}
|
||||
self.classKeys = {}
|
||||
self.KeyBagKeys = None #DATASIGN blob
|
||||
self.parseBinaryBlob(data)
|
||||
|
||||
@staticmethod
|
||||
def getSystemkbfileWipeID(filename):
|
||||
mkb = BPlistReader.plistWithFile(filename)
|
||||
return mkb["_MKBWIPEID"]
|
||||
|
||||
@staticmethod
|
||||
def createWithPlist(pldict):
|
||||
k835 = pldict.key835.decode("hex")
|
||||
data = ""
|
||||
if pldict.has_key("KeyBagKeys"):
|
||||
data = pldict["KeyBagKeys"].data
|
||||
else:
|
||||
data = ""
|
||||
keybag = Keybag.createWithDataSignBlob(data, k835)
|
||||
|
||||
if pldict.has_key("passcodeKey"):
|
||||
if keybag.unlockWithPasscodeKey(pldict["passcodeKey"].decode("hex")):
|
||||
print "Keybag unlocked with passcode key"
|
||||
else:
|
||||
print "FAILed to unlock keybag with passcode key"
|
||||
#HAX: inject DKey
|
||||
keybag.setDKey(pldict)
|
||||
return keybag
|
||||
|
||||
def setDKey(self, device_infos):
|
||||
self.classKeys[4] = {"CLAS": 4, "KEY": device_infos["DKey"].decode("hex")}
|
||||
|
||||
@staticmethod
|
||||
def createWithSystemkbfile(filename, bag1key, deviceKey=None):
|
||||
if filename.startswith("bplist"): #HAX
|
||||
mkb = BPlistReader.plistWithString(filename)
|
||||
else:
|
||||
mkb = BPlistReader.plistWithFile(filename)
|
||||
try:
|
||||
decryptedPlist = AESdecryptCBC(mkb["_MKBPAYLOAD"].data, bag1key, mkb["_MKBIV"].data, padding=True)
|
||||
except:
|
||||
print "FAIL: AESdecryptCBC _MKBPAYLOAD => wrong BAG1 key ?"
|
||||
return None
|
||||
if not decryptedPlist.startswith("bplist"):
|
||||
print "FAIL: decrypted _MKBPAYLOAD is not bplist"
|
||||
return None
|
||||
decryptedPlist = BPlistReader.plistWithString(decryptedPlist)
|
||||
blob = decryptedPlist["KeyBagKeys"].data
|
||||
kb = Keybag.createWithDataSignBlob(blob, deviceKey)
|
||||
if decryptedPlist.has_key("OpaqueStuff"):
|
||||
OpaqueStuff = BPlistReader.plistWithString(decryptedPlist["OpaqueStuff"].data)
|
||||
kb.passcodeComplexity = OpaqueStuff.get("keyboardType")
|
||||
return kb
|
||||
|
||||
|
||||
@staticmethod
|
||||
def createWithDataSignBlob(blob, deviceKey=None):
|
||||
keybag = tlvToDict(blob)
|
||||
|
||||
kb = Keybag(keybag.get("DATA", ""))
|
||||
kb.deviceKey = deviceKey
|
||||
kb.KeyBagKeys = blob
|
||||
kb.unlockAlwaysAccessible()
|
||||
|
||||
if len(keybag.get("SIGN", "")):
|
||||
hmackey = AESUnwrap(deviceKey, kb.attrs["HMCK"])
|
||||
#hmac key and data are swapped (on purpose or by mistake ?)
|
||||
sigcheck = hmac.new(key=keybag["DATA"], msg=hmackey, digestmod=sha1).digest()
|
||||
#fixed in ios 7
|
||||
if kb.attrs["VERS"] >= 4:
|
||||
sigcheck = hmac.new(key=hmackey, msg=keybag["DATA"], digestmod=sha1).digest()
|
||||
if sigcheck != keybag.get("SIGN", ""):
|
||||
print "Keybag: SIGN check FAIL"
|
||||
return kb
|
||||
|
||||
@staticmethod
|
||||
def createWithBackupManifest(manifest, password, deviceKey=None):
|
||||
kb = Keybag(manifest["BackupKeyBag"].data)
|
||||
kb.deviceKey = deviceKey
|
||||
if not kb.unlockBackupKeybagWithPasscode(password):
|
||||
print "Cannot decrypt backup keybag. Wrong password ?"
|
||||
return
|
||||
return kb
|
||||
|
||||
def isBackupKeybag(self):
|
||||
return self.type == BACKUP_KEYBAG
|
||||
|
||||
def parseBinaryBlob(self, data):
|
||||
currentClassKey = None
|
||||
|
||||
for tag, data in loopTLVBlocks(data):
|
||||
if len(data) == 4:
|
||||
data = struct.unpack(">L", data)[0]
|
||||
if tag == "TYPE":
|
||||
self.type = data & 0x3FFFFFFF #ignore the flags
|
||||
if self.type > 3:
|
||||
print "FAIL: keybag type > 3 : %d" % self.type
|
||||
elif tag == "UUID" and self.uuid is None:
|
||||
self.uuid = data
|
||||
elif tag == "WRAP" and self.wrap is None:
|
||||
self.wrap = data
|
||||
elif tag == "UUID":
|
||||
if currentClassKey:
|
||||
self.classKeys[currentClassKey["CLAS"]] = currentClassKey
|
||||
currentClassKey = {"UUID": data}
|
||||
elif tag in CLASSKEY_TAGS:
|
||||
currentClassKey[tag] = data
|
||||
else:
|
||||
self.attrs[tag] = data
|
||||
if currentClassKey:
|
||||
self.classKeys[currentClassKey["CLAS"]] = currentClassKey
|
||||
|
||||
def getPasscodekeyFromPasscode(self, passcode):
|
||||
if self.type == BACKUP_KEYBAG or self.type == OTA_KEYBAG:
|
||||
return PBKDF2(passcode, self.attrs["SALT"], iterations=self.attrs["ITER"]).read(32)
|
||||
else:
|
||||
#Warning, need to run derivation on device with this result
|
||||
return PBKDF2(passcode, self.attrs["SALT"], iterations=1).read(32)
|
||||
|
||||
def unlockBackupKeybagWithPasscode(self, passcode):
|
||||
if self.type != BACKUP_KEYBAG and self.type != OTA_KEYBAG:
|
||||
print "unlockBackupKeybagWithPasscode: not a backup keybag"
|
||||
return False
|
||||
return self.unlockWithPasscodeKey(self.getPasscodekeyFromPasscode(passcode))
|
||||
|
||||
def unlockAlwaysAccessible(self):
|
||||
for classkey in self.classKeys.values():
|
||||
k = classkey["WPKY"]
|
||||
if classkey["WRAP"] == WRAP_DEVICE:
|
||||
if not self.deviceKey:
|
||||
continue
|
||||
k = AESdecryptCBC(k, self.deviceKey)
|
||||
classkey["KEY"] = k
|
||||
return True
|
||||
|
||||
def unlockWithPasscodeKey(self, passcodekey):
|
||||
if self.type != BACKUP_KEYBAG and self.type != OTA_KEYBAG:
|
||||
if not self.deviceKey:
|
||||
print "ERROR, need device key to unlock keybag"
|
||||
return False
|
||||
|
||||
for classkey in self.classKeys.values():
|
||||
if not classkey.has_key("WPKY"):
|
||||
continue
|
||||
k = classkey["WPKY"]
|
||||
if classkey["WRAP"] & WRAP_PASSCODE:
|
||||
k = AESUnwrap(passcodekey, classkey["WPKY"])
|
||||
if not k:
|
||||
return False
|
||||
if classkey["WRAP"] & WRAP_DEVICE:
|
||||
if not self.deviceKey:
|
||||
continue
|
||||
k = AESdecryptCBC(k, self.deviceKey)
|
||||
classkey["KEY"] = k
|
||||
self.unlocked = True
|
||||
return True
|
||||
|
||||
def unwrapCurve25519(self, persistent_class, persistent_key):
|
||||
assert len(persistent_key) == 0x48
|
||||
#assert persistent_class == 2 #NSFileProtectionCompleteUnlessOpen
|
||||
mysecret = self.classKeys[persistent_class]["KEY"]
|
||||
mypublic = self.classKeys[persistent_class]["PBKY"]
|
||||
hispublic = persistent_key[:32]
|
||||
shared = curve25519(mysecret, hispublic)
|
||||
md = sha256('\x00\x00\x00\x01' + shared + hispublic + mypublic).digest()
|
||||
return AESUnwrap(md, persistent_key[32:])
|
||||
|
||||
def unwrapKeyForClass(self, clas, persistent_key, printError=True):
|
||||
if not self.classKeys.has_key(clas) or not self.classKeys[clas].has_key("KEY"):
|
||||
if printError: print "Keybag key %d missing or locked" % clas
|
||||
return ""
|
||||
ck = self.classKeys[clas]["KEY"]
|
||||
#if self.attrs.get("VERS", 2) >= 3 and clas == 2:
|
||||
if self.attrs.get("VERS", 2) >= 3 and self.classKeys[clas].get("KTYP", 0) == 1:
|
||||
return self.unwrapCurve25519(clas, persistent_key)
|
||||
if len(persistent_key) == 0x28:
|
||||
return AESUnwrap(ck, persistent_key)
|
||||
return
|
||||
|
||||
def wrapKeyForClass(self, clas, persistent_key):
|
||||
if not self.classKeys.has_key(clas) or not self.classKeys[clas].has_key("KEY"):
|
||||
print "Keybag key %d missing or locked" % clas
|
||||
return ""
|
||||
ck = self.classKeys[clas]["KEY"]
|
||||
return AESwrap(ck, persistent_key)
|
||||
|
||||
def printClassKeys(self):
|
||||
print "Keybag type : %s keybag (%d)" % (KEYBAG_TYPES[self.type], self.type)
|
||||
print "Keybag version : %d" % self.attrs["VERS"]
|
||||
print "Keybag UUID : %s" % self.uuid.encode("hex")
|
||||
print "-"*128
|
||||
print "".join(["Class".ljust(53),
|
||||
"WRAP".ljust(5),
|
||||
"Type".ljust(11),
|
||||
"Key".ljust(65),
|
||||
"Public key"])
|
||||
print "-"*128
|
||||
for k, ck in self.classKeys.items():
|
||||
if k == 6: print ""
|
||||
print "".join([PROTECTION_CLASSES.get(k).ljust(53),
|
||||
str(ck.get("WRAP","")).ljust(5),
|
||||
KEY_TYPES[ck.get("KTYP",0)].ljust(11),
|
||||
ck.get("KEY", "").encode("hex").ljust(65),
|
||||
ck.get("PBKY", "").encode("hex")])
|
||||
print ""
|
||||
|
||||
def getClearClassKeysDict(self):
|
||||
if self.unlocked:
|
||||
d = {}
|
||||
for ck in self.classKeys.values():
|
||||
d["%d" % ck["CLAS"]] = ck.get("KEY","").encode("hex")
|
||||
return d
|
@ -0,0 +1,381 @@
|
||||
from crypto.aes import AESdecryptCBC, AESencryptCBC
|
||||
from hfs.emf import cprotect_xattr, EMFVolume
|
||||
from hfs.hfs import HFSVolume, hfs_date, HFSFile
|
||||
from hfs.journal import carveBtreeNode, isDecryptedCorrectly
|
||||
from hfs.structs import *
|
||||
from util import sizeof_fmt, makedirs, hexdump
|
||||
import hashlib
|
||||
import os
|
||||
import struct
|
||||
|
||||
class NANDCarver(object):
|
||||
def __init__(self, volume, image, outputdir=None):
|
||||
self.volume = volume
|
||||
self.image = image
|
||||
self.nand = image
|
||||
self.ftlhax = False
|
||||
self.userblocks = None
|
||||
self.lpnToVpn = None
|
||||
self.files = {}
|
||||
self.keys = {}
|
||||
self.encrypted = image.encrypted and hasattr(volume, "emfkey")
|
||||
self.encrypted = hasattr(volume, "cp_root") and volume.cp_root != None
|
||||
if outputdir == None:
|
||||
if image.filename != "remote": outputdir = os.path.join(os.path.dirname(image.filename), "undelete")
|
||||
else: outputdir = os.path.join(".", "undelete")
|
||||
print "Carver output %s" % outputdir
|
||||
self.outputdir = outputdir
|
||||
self.okfiles = 0
|
||||
self.first_lba = self.volume.bdev.lbaoffset
|
||||
self.pageSize = image.pageSize
|
||||
self.blankPage = "\xDE\xAD\xBE\xEF" * (self.pageSize/4)
|
||||
self.emfkey = None
|
||||
self.fileIds = None
|
||||
self.fastMode = False
|
||||
if hasattr(volume, "emfkey"):
|
||||
self.emfkey = volume.emfkey
|
||||
|
||||
def carveFile(self, hfsfile, callback, lbas=None, filter_=None):
|
||||
for e in hfsfile.extents:
|
||||
if e.blockCount == 0:
|
||||
break
|
||||
for i in xrange(e.startBlock, e.startBlock+e.blockCount):
|
||||
if lbas and not i in lbas:
|
||||
continue
|
||||
|
||||
if self.fastMode:
|
||||
for vpn in self.ftlhax.get(self.first_lba+i, []):
|
||||
usn = 0
|
||||
s,d = self.nand.ftl.YAFTL_readPage(vpn, self.emfkey, self.first_lba+i)
|
||||
callback(d, usn, filter_)
|
||||
else:
|
||||
# s,d = self.nand.ftl.YAFTL_readPage(vpn, self.emfkey, self.first_lba+i)
|
||||
# callback(d, 0)
|
||||
usnsForLbn = self.ftlhax.get(self.first_lba+i, [])
|
||||
for usn in sorted(usnsForLbn.keys())[:-1]:
|
||||
for vpn in usnsForLbn[usn]:
|
||||
s,d = self.nand.ftl.YAFTL_readPage(vpn, self.emfkey, self.first_lba+i)
|
||||
callback(d, usn, filter_)
|
||||
|
||||
def _catalogFileCallback(self, data, usn, filter_):
|
||||
for k,v in carveBtreeNode(data,HFSPlusCatalogKey, HFSPlusCatalogData):
|
||||
if v.recordType != kHFSPlusFileRecord:
|
||||
continue
|
||||
if filter_ and not filter_(k,v):
|
||||
continue
|
||||
name = getString(k)
|
||||
#if not self.filterFileName(name):
|
||||
# continue
|
||||
h = hashlib.sha1(HFSPlusCatalogKey.build(k)).digest()
|
||||
if self.files.has_key(h):
|
||||
continue
|
||||
if not self.fileIds.has_key(v.data.fileID):
|
||||
print "Found deleted file record", v.data.fileID, name.encode("utf-8"), "created", hfs_date(v.data.createDate)
|
||||
self.files[h] = (name,v, usn)
|
||||
|
||||
def _attributesFileCallback(self, data, usn, filter_):
|
||||
for k,v in carveBtreeNode(data,HFSPlusAttrKey, HFSPlusAttrData):
|
||||
if getString(k) != "com.apple.system.cprotect":
|
||||
continue
|
||||
if self.fileIds.has_key(k.fileID):
|
||||
continue
|
||||
filekeys = self.keys.setdefault(k.fileID, [])
|
||||
try:
|
||||
cprotect = cprotect_xattr.parse(v.data)
|
||||
except:
|
||||
continue
|
||||
if cprotect.key_size == 0:
|
||||
continue
|
||||
filekey = self.volume.keybag.unwrapKeyForClass(cprotect.persistent_class, cprotect.persistent_key, False)
|
||||
if filekey and not filekey in filekeys:
|
||||
#print "Found key for file ID ", k.fileID
|
||||
filekeys.append(filekey)
|
||||
|
||||
def carveCatalog(self, lbas=None, filter_=None):
|
||||
return self.carveFile(self.volume.catalogFile, self._catalogFileCallback, lbas, filter_)
|
||||
|
||||
def carveKeys(self, lbas=None):
|
||||
return self.carveFile(self.volume.xattrFile, self._attributesFileCallback, lbas)
|
||||
|
||||
def pagesForLBN(self, lbn):
|
||||
return self.ftlhax.get(self.first_lba + lbn, {})
|
||||
|
||||
def decryptFileBlock(self, pn, filekey, lbn, decrypt_offset):
|
||||
s, ciphertext = self.nand.ftl.YAFTL_readPage(pn, None, lbn)
|
||||
if not self.encrypted:
|
||||
return ciphertext
|
||||
if not self.image.isIOS5():
|
||||
return AESdecryptCBC(ciphertext, filekey, self.volume.ivForLBA(lbn))
|
||||
clear = ""
|
||||
ivkey = hashlib.sha1(filekey).digest()[:16]
|
||||
for i in xrange(len(ciphertext)/0x1000):
|
||||
iv = self.volume.ivForLBA(decrypt_offset, False)
|
||||
iv = AESencryptCBC(iv, ivkey)
|
||||
clear += AESdecryptCBC(ciphertext[i*0x1000:(i+1)*0x1000], filekey, iv)
|
||||
decrypt_offset += 0x1000
|
||||
return clear
|
||||
|
||||
def writeUndeletedFile(self, filename, data):
|
||||
knownExtensions = (".m4a", ".plist",".sqlite",".sqlitedb", ".jpeg", ".jpg", ".png", ".db",".json",".xml",".sql")
|
||||
#windows invalid chars \/:*?"<>|
|
||||
filename = str(filename.encode("utf-8")).translate(None, "\\/:*?\"<>|,")
|
||||
folder = self.outputdir
|
||||
if self.outputdir == "./":
|
||||
folder = folder + "/undelete"
|
||||
elif filename.lower().endswith(knownExtensions):
|
||||
ext = filename[filename.rfind(".")+1:]
|
||||
folder = folder + "/" + ext.lower()
|
||||
makedirs(folder)
|
||||
open(folder + "/" + filename, "wb").write(data)
|
||||
|
||||
def getFileAtUSN(self, filename, filerecord, filekey, usn, previousVersion=None, exactSize=True):
|
||||
missing_pages = 0
|
||||
decrypt_offset = 0
|
||||
file_pages = []
|
||||
logicalSize = filerecord.dataFork.logicalSize
|
||||
for extent in self.volume.getAllExtents(filerecord.dataFork, filerecord.fileID):
|
||||
for bn in xrange(extent.startBlock, extent.startBlock + extent.blockCount):
|
||||
pn = self.pagesForLBN(bn).get(usn) #fail
|
||||
if pn:
|
||||
clear = self.decryptFileBlock(pn[-1], filekey, bn, decrypt_offset)
|
||||
file_pages.append(clear)
|
||||
elif previousVersion:
|
||||
file_pages.append(previousVersion[len(file_pages)])
|
||||
else:
|
||||
file_pages.append(self.blankPage)
|
||||
missing_pages += 1
|
||||
decrypt_offset += self.pageSize
|
||||
|
||||
print "Recovered %d:%s %d missing pages, size %s, created %s, contentModDate %s" % \
|
||||
(filerecord.fileID, filename.encode("utf-8"), missing_pages, sizeof_fmt(logicalSize), hfs_date(filerecord.createDate), hfs_date(filerecord.contentModDate))
|
||||
filename = "%d_%d_%s" % (filerecord.fileID, usn, filename)
|
||||
if missing_pages == 0:
|
||||
filename = "OK_" + filename
|
||||
self.okfiles += 1
|
||||
data = "".join(file_pages)
|
||||
if exactSize:
|
||||
data = data[:logicalSize]
|
||||
self.writeUndeletedFile(filename, data)
|
||||
return file_pages
|
||||
|
||||
#test for SQLite
|
||||
def rollbackExistingFile(self, filename):
|
||||
filerecord = self.volume.getFileRecordForPath(filename)
|
||||
filekey = self.volume.getFileKeyForFileId(filerecord.fileID)
|
||||
print "filekey", filekey.encode("hex")
|
||||
z = None
|
||||
for extent in filerecord.dataFork.HFSPlusExtentDescriptor:
|
||||
for bn in xrange(extent.startBlock, extent.startBlock + extent.blockCount):
|
||||
pages = self.pagesForLBN(bn)
|
||||
print pages
|
||||
for usn in sorted(pages.keys()):
|
||||
d = self.decryptFileBlock(pages[usn][-1], filekey, bn, 0)
|
||||
if d.startswith("SQL") or True:
|
||||
filechangecounter = struct.unpack(">L", d[24:28])
|
||||
print usn, "OK", filechangecounter
|
||||
z = self.getFileAtUSN(os.path.basename(filename), filerecord, filekey, usn, z)
|
||||
else:
|
||||
print usn, "FAIL"
|
||||
return
|
||||
|
||||
def filterFileName(self, filename):
|
||||
return filename.lower().endswith(".jpg")
|
||||
|
||||
def getExistingFileIDs(self):
|
||||
print "Collecting existing file ids"
|
||||
self.fileIds = self.volume.listAllFileIds()
|
||||
print "%d file IDs" % len(self.fileIds.keys())
|
||||
|
||||
def carveDeletedFiles_fast(self, catalogLBAs=None, filter_=None):
|
||||
self.fastMode = True
|
||||
if not self.ftlhax:
|
||||
hax, userblocks = self.nand.ftl.YAFTL_lookup1()
|
||||
self.ftlhax = hax
|
||||
self.userblocks = userblocks
|
||||
|
||||
self.files = {}
|
||||
if not self.fileIds:
|
||||
self.getExistingFileIDs()
|
||||
print "Carving catalog file"
|
||||
#catalogLBAs = None
|
||||
self.carveCatalog(catalogLBAs, filter_)
|
||||
|
||||
keysLbas = []
|
||||
for name, vv, usn in self.files.values():
|
||||
for i in xrange(vv.data.fileID, vv.data.fileID + 100):
|
||||
if self.volume.xattrTree.search((i, "com.apple.system.cprotect")):
|
||||
keysLbas.extend(self.volume.xattrTree.getLBAsHax())
|
||||
break
|
||||
|
||||
#print "keysLbas", keysLbas
|
||||
if self.encrypted and len(self.keys) == 0:
|
||||
print "Carving attribute file for file keys"
|
||||
#self.carveKeys(keysLbas)
|
||||
self.carveKeys()
|
||||
|
||||
self.okfiles = 0
|
||||
total = 0
|
||||
print "%d files, %d keys" % (len(self.files), len(self.keys))
|
||||
for name, vv, usn in self.files.values():
|
||||
if not self.keys.has_key(vv.data.fileID):
|
||||
print "No file key for %s" % name.encode("utf-8")
|
||||
keys = set(self.keys.get(vv.data.fileID, [self.emfkey]))
|
||||
print "%s" % (name.encode("utf-8"))
|
||||
if self.readFileHax(name, vv.data, keys):
|
||||
total += 1
|
||||
|
||||
print "Carving done, recovered %d deleted files, %d are most likely OK" % (total, self.okfiles)
|
||||
|
||||
def carveDeleteFiles_slow(self, catalogLBAs=None, filter_=None):
|
||||
self.fastMode = False
|
||||
self.files = {}
|
||||
if not self.ftlhax:
|
||||
self.ftlhax = self.nand.ftl.YAFTL_hax2()
|
||||
if not self.fileIds:
|
||||
self.getExistingFileIDs()
|
||||
if self.encrypted and len(self.keys) == 0:
|
||||
print "Carving attribute file for file keys"
|
||||
self.carveKeys()
|
||||
print "Carving catalog file"
|
||||
self.carveCatalog(catalogLBAs, filter_)
|
||||
|
||||
self.okfiles = 0
|
||||
total = 0
|
||||
print "%d files" % len(self.files)
|
||||
for name, vv, usn in self.files.values():
|
||||
keys = set(self.keys.get(vv.data.fileID, [self.emfkey]))
|
||||
print "%s num keys = %d" % (name, len(keys))
|
||||
good_usn = 0
|
||||
for filekey in keys:
|
||||
if good_usn:
|
||||
break
|
||||
first_block = vv.data.dataFork.HFSPlusExtentDescriptor[0].startBlock
|
||||
for usn, pn in self.pagesForLBN(first_block).items():
|
||||
d = self.decryptFileBlock(pn[-1], filekey, self.first_lba+first_block, 0)
|
||||
if isDecryptedCorrectly(d):
|
||||
#print "USN for first block : ", usn
|
||||
good_usn = usn
|
||||
break
|
||||
if good_usn == 0:
|
||||
continue
|
||||
self.getFileAtUSN(name, vv.data, filekey, good_usn)
|
||||
|
||||
def getBBTOC(self, block):
|
||||
btoc = self.nand.ftl.readBTOCPages(block, self.nand.ftl.totalPages)
|
||||
if not btoc:
|
||||
return self.nand.ftl.block_lpn_to_vpn(block)
|
||||
bbtoc = {}
|
||||
for i in xrange(len(btoc)):
|
||||
bbtoc[btoc[i]] = block*self.nand.ftl.vfl.pages_per_sublk + i
|
||||
return bbtoc
|
||||
|
||||
def readFileHax(self, filename, filerecord, filekeys):
|
||||
lba0 = self.first_lba + filerecord.dataFork.HFSPlusExtentDescriptor[0].startBlock
|
||||
filekey = None
|
||||
good_usn = None
|
||||
first_vpn = 0
|
||||
first_usn = 0
|
||||
hax = self.ftlhax
|
||||
print "%d versions for first lba" % len(hax.get(lba0, []))
|
||||
for k in filekeys:
|
||||
for vpn in hax.get(lba0, []):
|
||||
s, ciphertext = self.nand.ftl.YAFTL_readPage(vpn, key=None, lpn=None)
|
||||
if not ciphertext:
|
||||
continue
|
||||
d = self.decryptFileBlock2(ciphertext, k, lba0, 0)
|
||||
#hexdump(d[:16])
|
||||
if isDecryptedCorrectly(d):
|
||||
filekey = k
|
||||
first_vpn = vpn
|
||||
first_usn = good_usn = s.usn
|
||||
block = vpn / self.nand.ftl.vfl.pages_per_sublk
|
||||
break
|
||||
if not filekey:
|
||||
return False
|
||||
logicalSize = filerecord.dataFork.logicalSize
|
||||
missing_pages = 0
|
||||
file_pages = []
|
||||
lbns = []
|
||||
for extent in self.volume.getAllExtents(filerecord.dataFork, filerecord.fileID):
|
||||
for bn in xrange(extent.startBlock, extent.startBlock + extent.blockCount):
|
||||
lbns.append(self.first_lba + bn)
|
||||
datas = {}
|
||||
usnblocksToLookAT = sorted(filter(lambda x: x >= good_usn, self.userblocks.keys()))[:5]
|
||||
print usnblocksToLookAT
|
||||
usnblocksToLookAT.insert(0, 0)
|
||||
first_block = True
|
||||
done = False
|
||||
for usn in usnblocksToLookAT:
|
||||
if first_block:
|
||||
bbtoc = self.getBBTOC(block)
|
||||
first_block = False
|
||||
else:
|
||||
bbtoc = self.getBBTOC(self.userblocks[usn])
|
||||
for lbn in bbtoc.keys():
|
||||
if not lbn in lbns:
|
||||
continue
|
||||
idx = lbns.index(lbn)
|
||||
s, ciphertext = self.nand.ftl.YAFTL_readPage(bbtoc[lbn], key=None, lpn=None)
|
||||
if not ciphertext:
|
||||
continue
|
||||
ciphertext = self.decryptFileBlock2(ciphertext, filekey, lbn, idx*self.pageSize)
|
||||
if idx == 0:
|
||||
if not isDecryptedCorrectly(ciphertext):
|
||||
continue
|
||||
datas[idx*self.pageSize] = (ciphertext, lbn - self.first_lba)
|
||||
#if idx == len(lbns):
|
||||
if len(datas) == len(lbns):
|
||||
done=True
|
||||
break
|
||||
|
||||
if done:
|
||||
break
|
||||
cleartext = ""
|
||||
decrypt_offset = 0
|
||||
for i in xrange(0,logicalSize, self.pageSize):
|
||||
if datas.has_key(i):
|
||||
ciphertext, lbn = datas[i]
|
||||
cleartext += ciphertext
|
||||
else:
|
||||
cleartext += self.blankPage
|
||||
missing_pages += 1
|
||||
decrypt_offset += self.pageSize
|
||||
|
||||
print "Recovered %d:%s %d missing pages, size %s, created %s, contentModDate %s" % \
|
||||
(filerecord.fileID, filename.encode("utf-8"), missing_pages, sizeof_fmt(logicalSize), hfs_date(filerecord.createDate), hfs_date(filerecord.contentModDate))
|
||||
filename = "%d_%d_%s" % (filerecord.fileID, first_usn, filename)
|
||||
if missing_pages == 0:
|
||||
filename = "OK_" + filename
|
||||
self.okfiles += 1
|
||||
if True:#exactSize:
|
||||
cleartext = cleartext[:logicalSize]
|
||||
self.writeUndeletedFile(filename, cleartext)
|
||||
return True
|
||||
|
||||
def decryptFileBlock2(self, ciphertext, filekey, lbn, decrypt_offset):
|
||||
if not self.encrypted:
|
||||
return ciphertext
|
||||
if not self.image.isIOS5():
|
||||
return AESdecryptCBC(ciphertext, filekey, self.volume.ivForLBA(lbn, add=False))
|
||||
clear = ""
|
||||
ivkey = hashlib.sha1(filekey).digest()[:16]
|
||||
for i in xrange(len(ciphertext)/0x1000):
|
||||
iv = self.volume.ivForLBA(decrypt_offset, False)
|
||||
iv = AESencryptCBC(iv, ivkey)
|
||||
clear += AESdecryptCBC(ciphertext[i*0x1000:(i+1)*0x1000], filekey, iv)
|
||||
decrypt_offset += 0x1000
|
||||
return clear
|
||||
|
||||
def getFileRanges(self, hfsfile):
|
||||
res = []
|
||||
for e in hfsfile.extents:
|
||||
if e.blockCount == 0:
|
||||
break
|
||||
res.append(xrange(e.startBlock, e.startBlock+e.blockCount))
|
||||
return res
|
||||
|
||||
def readFSPage(self, vpn, lba):
|
||||
s,d = self.nand.ftl.YAFTL_readPage(vpn, self.emfkey, self.first_lba+lba)
|
||||
if s:
|
||||
return d
|
||||
|
@ -0,0 +1,86 @@
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
|
||||
"""
|
||||
row-by-row dump
|
||||
page = data + spare metadata + iokit return code + iokit return code 2
|
||||
"""
|
||||
class NANDImageFlat(object):
|
||||
def __init__(self, filename, geometry):
|
||||
flags = os.O_RDONLY
|
||||
if sys.platform == "win32":
|
||||
flags |= os.O_BINARY
|
||||
self.fd = os.open(filename, flags)
|
||||
self.nCEs = geometry["#ce"]
|
||||
self.pageSize = geometry["#page-bytes"]
|
||||
self.metaSize = geometry.get("meta-per-logical-page", 12)
|
||||
self.dumpedPageSize = geometry.get("dumpedPageSize", self.pageSize + self.metaSize + 8)
|
||||
self.hasIOKitStatus = True
|
||||
if self.dumpedPageSize == self.pageSize + geometry["#spare-bytes"] or self.dumpedPageSize == self.pageSize + self.metaSize:
|
||||
self.hasIOKitStatus = False
|
||||
self.blankPage = "\xFF" * self.pageSize
|
||||
self.blankSpare = "\xFF" * self.metaSize
|
||||
self.imageSize = os.path.getsize(filename)
|
||||
expectedSize = geometry["#ce"] * geometry["#ce-blocks"] * geometry["#block-pages"] * self.dumpedPageSize
|
||||
if self.imageSize < expectedSize:
|
||||
raise Exception("Error: image appears to be truncated, expected size=%d" % expectedSize)
|
||||
print "Image size matches expected size, looks ok"
|
||||
|
||||
def _readPage(self, ce, page):
|
||||
i = page * self.nCEs + ce
|
||||
off = i * self.dumpedPageSize
|
||||
os.lseek(self.fd, off, os.SEEK_SET)
|
||||
return os.read(self.fd, self.dumpedPageSize)
|
||||
|
||||
def readPage(self, ce, page):
|
||||
d = self._readPage(ce, page)
|
||||
if not d or len(d) != self.dumpedPageSize:
|
||||
return None,None
|
||||
if self.hasIOKitStatus:
|
||||
r1,r2 = struct.unpack("<LL", d[self.pageSize+self.metaSize:self.pageSize+self.metaSize+8])
|
||||
if r1 != 0x0:
|
||||
return None, None
|
||||
data = d[:self.pageSize]
|
||||
spare = d[self.pageSize:self.pageSize+self.metaSize]
|
||||
if not self.hasIOKitStatus and data == self.blankPage and spare == self.blankSpare:
|
||||
return None,None
|
||||
return spare, data
|
||||
|
||||
"""
|
||||
iEmu NAND format
|
||||
one file for each CE, start with chip id (8 bytes) then pages
|
||||
page = non-empty flag (1 byte) + data + spare metadata (12 bytes)
|
||||
"""
|
||||
class NANDImageSplitCEs(object):
|
||||
def __init__(self, folder, geometry):
|
||||
flags = os.O_RDONLY
|
||||
if sys.platform == "win32":
|
||||
flags |= os.O_BINARY
|
||||
self.fds = []
|
||||
self.nCEs = geometry["#ce"]
|
||||
self.pageSize = geometry["#page-bytes"]
|
||||
self.metaSize = 12
|
||||
self.npages = 0
|
||||
self.dumpedPageSize = 1 + self.pageSize + self.metaSize
|
||||
for i in xrange(self.nCEs):
|
||||
fd = os.open(folder + "/ce_%d.bin" % i, flags)
|
||||
self.fds.append(fd)
|
||||
self.npages += os.fstat(fd).st_size / self.dumpedPageSize
|
||||
|
||||
def _readPage(self, ce, page):
|
||||
fd = self.fds[ce]
|
||||
off = 8 + page * self.dumpedPageSize #skip chip id
|
||||
os.lseek(fd, off, os.SEEK_SET)
|
||||
return os.read(fd, self.dumpedPageSize)
|
||||
|
||||
def readPage(self, ce, page):
|
||||
d = self._readPage(ce, page)
|
||||
if not d or len(d) != self.dumpedPageSize:
|
||||
return None,None
|
||||
if d[0] != '1' and d[0] != '\x01':
|
||||
return None,None
|
||||
data = d[1:1+self.pageSize]
|
||||
spare = d[1+self.pageSize:1+self.pageSize+self.metaSize]
|
||||
return spare, data
|
||||
|
@ -0,0 +1,220 @@
|
||||
from carver import NANDCarver
|
||||
from construct.core import Struct
|
||||
from construct.macros import ULInt32, ULInt16, Array, ULInt8, Padding
|
||||
from pprint import pprint
|
||||
from structs import SpareData
|
||||
from util import hexdump
|
||||
from vfl import VFL
|
||||
import plistlib
|
||||
|
||||
"""
|
||||
openiboot/plat-s5l8900/ftl.c
|
||||
openiboot/plat-s5l8900/includes/s5l8900/ftl.h
|
||||
"""
|
||||
FTLCxtLog = Struct("FTLCxtLog",
|
||||
ULInt32("usn"),
|
||||
ULInt16("wVbn"),
|
||||
ULInt16("wLbn"),
|
||||
ULInt32("wPageOffsets"),
|
||||
ULInt16("pagesUsed"),
|
||||
ULInt16("pagesCurrent"),
|
||||
ULInt32("isSequential")
|
||||
)
|
||||
|
||||
FTLCxtElement2 = Struct("FTLCxtElement2",
|
||||
ULInt16("field_0"),
|
||||
ULInt16("field_2")
|
||||
)
|
||||
|
||||
FTLCxt = Struct("FTLCxt",
|
||||
ULInt32("usnDec"),
|
||||
ULInt32("nextblockusn"),
|
||||
ULInt16("wNumOfFreeVb"),
|
||||
ULInt16("nextFreeIdx"),
|
||||
ULInt16("swapCounter"),
|
||||
Array(20, ULInt16("awFreeVb")),
|
||||
ULInt16("field_36"),
|
||||
Array(18, ULInt32("pages_for_pawMapTable")),
|
||||
Array(36, ULInt32("pages_for_pawEraseCounterTable")),
|
||||
Array(34, ULInt32("pages_for_wPageOffsets")),
|
||||
ULInt32("pawMapTable"),
|
||||
ULInt32("pawEraseCounterTable"),
|
||||
ULInt32("wPageOffsets"),
|
||||
Array(18, FTLCxtLog),
|
||||
ULInt32("eraseCounterPagesDirty"),
|
||||
ULInt16("unk3"),
|
||||
Array(3, ULInt16("FTLCtrlBlock")),
|
||||
ULInt32("FTLCtrlPage"),
|
||||
ULInt32("clean"),
|
||||
Array(36, ULInt32("pages_for_pawReadCounterTable")),
|
||||
ULInt32("pawReadCounterTable"),
|
||||
Array(5, FTLCxtElement2),
|
||||
ULInt32("field_3C8"),
|
||||
ULInt32("totalReadCount"),
|
||||
ULInt32("page_for_FTLCountsTable"),
|
||||
ULInt32("hasFTLCountsTable"),
|
||||
Padding(0x420), #, ULInt8("field_3D8")),
|
||||
ULInt32("versionLower"),
|
||||
ULInt32("versionUpper")
|
||||
)
|
||||
|
||||
FTL_CTX_TYPE = 0x43
|
||||
FTL_BLOCK_MAP = 0x44
|
||||
FTL_ERASE_COUNTER = 0x46
|
||||
FTL_MOUNTED = 0x47
|
||||
FTL_CTX_TYPE_MAX = 0x4F
|
||||
USER_TYPE = 0x40
|
||||
USER_LAST_TYPE = 0x41 #last user page in superblock?
|
||||
|
||||
class FTL(object):
|
||||
def __init__(self, nand, vfl):
|
||||
self.nand = nand
|
||||
self.vfl = vfl
|
||||
self.pawMapTable = {} #maps logical blocks to virtual blocks
|
||||
self.pLogs = {}
|
||||
if not self.FTL_open():
|
||||
self.FTL_restore()
|
||||
|
||||
def FTL_open(self):
|
||||
minUsnDec = 0xffffffff
|
||||
ftlCtrlBlock = 0xffff
|
||||
for vb in self.vfl.VFL_get_FTLCtrlBlock():
|
||||
s, d = self.vfl.read_single_page(vb * self.vfl.pages_per_sublk)
|
||||
if not s:
|
||||
continue
|
||||
if s.type >= FTL_CTX_TYPE and s.type <= FTL_CTX_TYPE_MAX:
|
||||
if s.usn < minUsnDec:
|
||||
ftlCtrlBlock = vb
|
||||
minUsnDec = s.usn
|
||||
|
||||
print ftlCtrlBlock
|
||||
self.ftlCtrlBlock = ftlCtrlBlock
|
||||
for p in xrange(self.vfl.pages_per_sublk-1,1, -1):
|
||||
s, d = self.vfl.read_single_page(ftlCtrlBlock * self.vfl.pages_per_sublk + p)
|
||||
if not s:
|
||||
continue
|
||||
#print s
|
||||
#print p
|
||||
if s.type == FTL_CTX_TYPE:
|
||||
print s.usn
|
||||
ctx = FTLCxt.parse(d)
|
||||
if ctx.versionLower == 0x46560001:
|
||||
print ctx
|
||||
assert ctx.FTLCtrlPage == (ftlCtrlBlock * self.vfl.pages_per_sublk + p)
|
||||
break
|
||||
else:
|
||||
print "Unclean shutdown, last type 0x%x" % s.type
|
||||
return False
|
||||
self.ctx = ctx
|
||||
print "FTL_open OK !"
|
||||
return True
|
||||
|
||||
def determine_block_type(self, block):
|
||||
maxUSN = 0
|
||||
isSequential = True
|
||||
for page in xrange(self.vfl.pages_per_sublk-1,1, -1):
|
||||
s, _ = self.vfl.read_single_page(block * self.vfl.pages_per_sublk + page)
|
||||
if not s:
|
||||
continue
|
||||
if s.usn > maxUSN:
|
||||
maxUSN = s.usn
|
||||
if s.lpn % self.vfl.pages_per_sublk != page:
|
||||
isSequential = False
|
||||
return isSequential, maxUSN
|
||||
return isSequential, maxUSN
|
||||
|
||||
def FTL_restore(self):
|
||||
self.pLogs = self.vfl.nand.loadCachedData("pLogs")
|
||||
self.pawMapTable = self.vfl.nand.loadCachedData("pawMapTable")
|
||||
if self.pLogs and self.pawMapTable:
|
||||
print "Found cached FTL restore information"
|
||||
return
|
||||
self.pawMapTable = {}
|
||||
self.pLogs = {}
|
||||
ctx = None
|
||||
for p in xrange(self.vfl.pages_per_sublk-1,1, -1):
|
||||
s, d = self.vfl.read_single_page(self.ftlCtrlBlock * self.vfl.pages_per_sublk + p)
|
||||
if not s:
|
||||
continue
|
||||
if s.type == FTL_CTX_TYPE:
|
||||
print s.usn
|
||||
ctx = FTLCxt.parse(d)
|
||||
if ctx.versionLower == 0x46560001:
|
||||
print ctx
|
||||
assert ctx.FTLCtrlPage == (self.ftlCtrlBlock * self.vfl.pages_per_sublk + p)
|
||||
print "Found most recent ctx"
|
||||
break
|
||||
if not ctx:
|
||||
print "FTL_restore fail did not find ctx"
|
||||
raise
|
||||
blockMap = {}
|
||||
self.nonSequential = {}
|
||||
print "FTL_restore in progress ..."
|
||||
for sblock in xrange(self.vfl.userSuBlksTotal + 23):
|
||||
for page in xrange(self.vfl.pages_per_sublk):
|
||||
s, d = self.vfl.read_single_page(sblock * self.vfl.pages_per_sublk + page)
|
||||
if not s:
|
||||
continue
|
||||
if s.type >= FTL_CTX_TYPE and s.type <= FTL_CTX_TYPE_MAX:
|
||||
break
|
||||
if s.type != USER_TYPE and s.type != USER_LAST_TYPE:
|
||||
print "Weird page type %x at %x %x" % (s.type, sblock, page)
|
||||
continue
|
||||
if s.lpn % self.vfl.pages_per_sublk != page:
|
||||
print "Block %d non sequential" % sblock
|
||||
self.nonSequential[sblock] = 1
|
||||
blockMap[sblock] = (s.lpn / self.vfl.pages_per_sublk, s.usn)
|
||||
break
|
||||
|
||||
z = dict([(i, [(a, blockMap[a][1]) for a in blockMap.keys() if blockMap[a][0] ==i]) for i in xrange(self.vfl.userSuBlksTotal)])
|
||||
for k,v in z.items():
|
||||
if len(v) == 2:
|
||||
print k, v
|
||||
vbA, usnA = v[0]
|
||||
vbB, usnB = v[1]
|
||||
if usnA > usnB: #smallest USN is map block, highest log block
|
||||
self.pawMapTable[k] = vbB
|
||||
self.restoreLogBlock(k, vbA)
|
||||
else:
|
||||
self.pawMapTable[k] = vbA
|
||||
self.restoreLogBlock(k, vbB)
|
||||
elif len(v) > 2:
|
||||
raise Exception("fufu", k, v)
|
||||
else:
|
||||
self.pawMapTable[k] = v[0][0]
|
||||
self.vfl.nand.cacheData("pLogs", self.pLogs)
|
||||
self.vfl.nand.cacheData("pawMapTable", self.pawMapTable)
|
||||
|
||||
def restoreLogBlock(self, lbn, vbn):
|
||||
log = {"wVbn": vbn, "wPageOffsets": {}}
|
||||
for page in xrange(self.vfl.pages_per_sublk):
|
||||
s, d = self.vfl.read_single_page(vbn * self.vfl.pages_per_sublk + page)
|
||||
if not s:
|
||||
break
|
||||
log["wPageOffsets"][s.lpn % self.vfl.pages_per_sublk] = page
|
||||
self.pLogs[lbn] = log
|
||||
|
||||
def mapPage(self, lbn, offset):
|
||||
if self.pLogs.has_key(lbn):
|
||||
if self.pLogs[lbn]["wPageOffsets"].has_key(offset):
|
||||
offset = self.pLogs[lbn]["wPageOffsets"][offset]
|
||||
#print "mapPage got log %d %d" % (lbn, offset)
|
||||
return self.pLogs[lbn]["wVbn"] * self.vfl.pages_per_sublk + offset
|
||||
if not self.pawMapTable.has_key(lbn):
|
||||
return 0xFFFFFFFF
|
||||
return self.pawMapTable[lbn] * self.vfl.pages_per_sublk + offset
|
||||
|
||||
def readLPN(self, lpn, key=None):
|
||||
lbn = lpn / self.vfl.pages_per_sublk
|
||||
offset = lpn % self.vfl.pages_per_sublk
|
||||
vpn = self.mapPage(lbn, offset)
|
||||
if vpn == 0xFFFFFFFF:
|
||||
print "lbn not found %d" % lbn
|
||||
return "\xFF" * self.nand.pageSize
|
||||
s,d = self.vfl.read_single_page(vpn, key, lpn)
|
||||
if not s:
|
||||
return None
|
||||
if s.lpn != lpn:
|
||||
raise Exception("FTL translation FAIL spare lpn=%d vs expected %d" % (s.lpn, lpn))
|
||||
return d
|
||||
|
425
dump-imessages/iphone-dataprotection/python_scripts/nand/nand.py
Normal file
425
dump-imessages/iphone-dataprotection/python_scripts/nand/nand.py
Normal file
@ -0,0 +1,425 @@
|
||||
from crypto.aes import AESdecryptCBC
|
||||
from firmware.img2 import IMG2
|
||||
from firmware.img3 import Img3, extract_img3s
|
||||
from firmware.scfg import parse_SCFG
|
||||
from hfs.emf import EMFVolume
|
||||
from hfs.hfs import HFSVolume
|
||||
from image import NANDImageSplitCEs, NANDImageFlat
|
||||
from keystore.effaceable import check_effaceable_header, EffaceableLockers
|
||||
from legacyftl import FTL
|
||||
from partition_tables import GPT_partitions, parse_lwvm, parse_mbr, parse_gpt, \
|
||||
APPLE_ENCRYPTED
|
||||
from progressbar import ProgressBar
|
||||
from remote import NANDRemote, IOFlashStorageKitClient
|
||||
from structs import *
|
||||
from util import sizeof_fmt, write_file, load_pickle, save_pickle, hexdump, \
|
||||
makedirs
|
||||
from util.bdev import FTLBlockDevice
|
||||
from vfl import VFL
|
||||
from vsvfl import VSVFL
|
||||
from yaftl import YAFTL
|
||||
import math
|
||||
import os
|
||||
import plistlib
|
||||
import struct
|
||||
|
||||
def ivForPage(page):
|
||||
iv = ""
|
||||
for _ in xrange(4):
|
||||
if (page & 1):
|
||||
page = 0x80000061 ^ (page >> 1);
|
||||
else:
|
||||
page = page >> 1;
|
||||
iv += struct.pack("<L", page)
|
||||
return iv
|
||||
|
||||
#iOS 3
|
||||
def getEMFkeyFromCRPT(data, key89B):
|
||||
assert data.startswith("tprc")
|
||||
z = AESdecryptCBC(data[4:0x44], key89B)
|
||||
assert z.startswith("TPRC"), "wrong key89B"
|
||||
#last_byte = struct.unpack("<Q", z[4:4+8])[0]
|
||||
emf = z[16:16+32]
|
||||
return emf
|
||||
|
||||
class NAND(object):
|
||||
H2FMI_HASH_TABLE = gen_h2fmi_hash_table()
|
||||
|
||||
def __init__(self, filename, device_infos, ppn=False):
|
||||
self.device_infos = device_infos
|
||||
self.partition_table = None
|
||||
self.lockers = {}
|
||||
self.iosVersion = 0
|
||||
self.hasMBR = False
|
||||
self.metadata_whitening = False
|
||||
self.filename = filename
|
||||
self.encrypted = device_infos["hwModel"] not in ["M68AP", "N45AP", "N82AP", "N72AP"]
|
||||
self.initGeometry(device_infos["nand"])
|
||||
|
||||
if os.path.basename(filename).startswith("ce_"):
|
||||
self.image = NANDImageSplitCEs(os.path.dirname(filename), device_infos["nand"])
|
||||
elif filename == "remote":
|
||||
self.image = NANDRemote(self.pageSize, self.metaSize, self.pagesPerBlock, self.bfn)
|
||||
else:
|
||||
self.image = NANDImageFlat(filename, device_infos["nand"])
|
||||
|
||||
s, page0 = self.readPage(0,0)
|
||||
self.nandonly = (page0 != None) and page0.startswith("ndrG")
|
||||
if self.nandonly:
|
||||
self.encrypted = True
|
||||
|
||||
magics = ["DEVICEINFOBBT"]
|
||||
nandsig = None
|
||||
if page0 and page0[8:14] == "Darwin":
|
||||
print "Found old style signature", page0[:8]
|
||||
nandsig = page0
|
||||
else:
|
||||
magics.append("NANDDRIVERSIGN")
|
||||
|
||||
#sp0 = {}
|
||||
sp0 = self.readSpecialPages(0, magics)
|
||||
print "Found %s special pages in CE 0" % (", ".join(sp0.keys()))
|
||||
if not self.nandonly:
|
||||
print "Device does not boot from NAND (=> has a NOR)"
|
||||
|
||||
vfltype = '1' #use VSVFL by default
|
||||
if not nandsig:
|
||||
nandsig = sp0.get("NANDDRIVERSIGN")
|
||||
if not nandsig:
|
||||
print "NANDDRIVERSIGN not found, assuming metadata withening = %d" % self.metadata_whitening
|
||||
else:
|
||||
nSig, flags = struct.unpack("<LL", nandsig[:8])
|
||||
#assert nandsig[3] == chr(0x43)
|
||||
vfltype = nandsig[1]
|
||||
self.metadata_whitening = (flags & 0x10000) != 0
|
||||
print "NAND signature 0x%x flags 0x%x withening=%d, epoch=%s" % (nSig, flags, self.metadata_whitening, nandsig[0])
|
||||
|
||||
if not self.nandonly:
|
||||
if self.device_infos.has_key("lockers"):
|
||||
self.lockers = EffaceableLockers(self.device_infos.lockers.data)
|
||||
else:
|
||||
unit = self.findLockersUnit()
|
||||
if unit:
|
||||
self.lockers = EffaceableLockers(unit[0x40:])
|
||||
self.lockers.display()
|
||||
if not self.device_infos.has_key("lockers"):
|
||||
self.device_infos.lockers = plistlib.Data(unit[0x40:0x40+960])
|
||||
EMF = self.getEMF(device_infos["key89B"].decode("hex"))
|
||||
dkey = self.getDKey(device_infos["key835"].decode("hex"))
|
||||
self.device_infos.EMF = EMF.encode("hex")
|
||||
self.device_infos.DKey = dkey.encode("hex")
|
||||
|
||||
deviceuniqueinfo = sp0.get("DEVICEUNIQUEINFO")
|
||||
if not deviceuniqueinfo:
|
||||
print "DEVICEUNIQUEINFO not found"
|
||||
else:
|
||||
scfg = parse_SCFG(deviceuniqueinfo)
|
||||
print "Found DEVICEUNIQUEINFO, serial number=%s" % scfg.get("SrNm","SrNm not found !")
|
||||
|
||||
if vfltype == '0':
|
||||
print "Using legacy VFL"
|
||||
self.vfl = VFL(self)
|
||||
self.ftl = FTL(self, self.vfl)
|
||||
elif not ppn:
|
||||
print "Using VSVFL"
|
||||
self.vfl = VSVFL(self)
|
||||
self.ftl = YAFTL(self.vfl)
|
||||
|
||||
def initGeometry(self, d):
|
||||
self.metaSize = d.get("meta-per-logical-page", 0)
|
||||
if self.metaSize == 0:
|
||||
self.metaSize = 12
|
||||
dumpedPageSize = d.get("dumpedPageSize", d["#page-bytes"] + self.metaSize + 8)
|
||||
self.dump_size= d["#ce"] * d["#ce-blocks"] * d["#block-pages"] * dumpedPageSize
|
||||
self.totalPages = d["#ce"] * d["#ce-blocks"] * d["#block-pages"]
|
||||
nand_size = d["#ce"] * d["#ce-blocks"] * d["#block-pages"] * d["#page-bytes"]
|
||||
hsize = sizeof_fmt(nand_size)
|
||||
self.bfn = d.get("boot-from-nand", False)
|
||||
self.dumpedPageSize = dumpedPageSize
|
||||
self.pageSize = d["#page-bytes"]
|
||||
self.bootloaderBytes = d.get("#bootloader-bytes", 1536)
|
||||
self.emptyBootloaderPage = "\xFF" * self.bootloaderBytes
|
||||
self.blankPage = "\xFF" * self.pageSize
|
||||
self.nCEs =d["#ce"]
|
||||
self.blocksPerCE = d["#ce-blocks"]
|
||||
self.pagesPerBlock = d["#block-pages"]
|
||||
self.pagesPerCE = self.blocksPerCE * self.pagesPerBlock
|
||||
self.vendorType = d["vendor-type"]
|
||||
self.deviceReadId = d.get("device-readid", 0)
|
||||
self.banks_per_ce_vfl = d["banks-per-ce"]
|
||||
if d.has_key("metadata-whitening"):
|
||||
self.metadata_whitening = (d["metadata-whitening"].data == "\x01\x00\x00\x00")
|
||||
if nand_chip_info.has_key(self.deviceReadId):
|
||||
self.banks_per_ce_physical = nand_chip_info.get(self.deviceReadId)[7]
|
||||
else:
|
||||
#raise Exception("Unknown deviceReadId %x" % self.deviceReadId)
|
||||
print "!!! Unknown deviceReadId %x, assuming 1 physical bank /CE, will probably fail" % self.deviceReadId
|
||||
self.banks_per_ce_physical = 1
|
||||
print "Chip id 0x%x banks per CE physical %d" % (self.deviceReadId, self.banks_per_ce_physical)
|
||||
self.blocks_per_bank = self.blocksPerCE / self.banks_per_ce_physical
|
||||
if self.blocksPerCE & (self.blocksPerCE-1) == 0:
|
||||
self.bank_address_space = self.blocks_per_bank
|
||||
self.total_block_space = self.blocksPerCE
|
||||
else:
|
||||
bank_address_space = next_power_of_two(self.blocks_per_bank)
|
||||
self.bank_address_space = bank_address_space
|
||||
self.total_block_space = ((self.banks_per_ce_physical-1)*bank_address_space) + self.blocks_per_bank
|
||||
self.bank_mask = int(math.log(self.bank_address_space * self.pagesPerBlock,2))
|
||||
print "NAND geometry : %s (%d CEs (%d physical banks/CE) of %d blocks of %d pages of %d bytes data, %d bytes metdata)" % \
|
||||
(hsize, self.nCEs, self.banks_per_ce_physical, self.blocksPerCE, self.pagesPerBlock, self.pageSize, d["meta-per-logical-page"])
|
||||
|
||||
def unwhitenMetadata(self, meta, pagenum):
|
||||
if len(meta) != 12:
|
||||
return None
|
||||
s = list(struct.unpack("<LLL", meta))
|
||||
for i in xrange(3):
|
||||
s[i] ^= NAND.H2FMI_HASH_TABLE[(i+pagenum) % len(NAND.H2FMI_HASH_TABLE)]
|
||||
return struct.pack("<LLL", s[0], s[1],s[2])
|
||||
|
||||
def readBootPage(self, ce, page):
|
||||
s,d=self.readPage(ce, page)
|
||||
if d:
|
||||
return d[:self.bootloaderBytes]
|
||||
else:
|
||||
#print "readBootPage %d %d failed" % (ce,page)
|
||||
return self.emptyBootloaderPage
|
||||
|
||||
def readMetaPage(self, ce, block, page, spareType=SpareData):
|
||||
return self.readBlockPage(ce, block, page, META_KEY, spareType=spareType)
|
||||
|
||||
def readBlockPage(self, ce, block, page, key=None, lpn=None, spareType=SpareData):
|
||||
assert page < self.pagesPerBlock
|
||||
pn = block * self.pagesPerBlock + page
|
||||
return self.readPage(ce, pn, key, lpn, spareType=spareType)
|
||||
|
||||
def translateabsPage(self, page):
|
||||
return page % self.nCEs, page/self.nCEs
|
||||
|
||||
def readAbsPage(self, page, key=None, lpn=None):
|
||||
return self.readPage(page % self.nCEs, page/self.nCEs, key, lpn)
|
||||
|
||||
def readPage(self, ce, page, key=None, lpn=None, spareType=SpareData):
|
||||
if ce > self.nCEs or page > self.pagesPerCE:
|
||||
#hax physical banking
|
||||
pass#raise Exception("CE %d Page %d out of bounds" % (ce, page))
|
||||
if self.filename != "remote": #undo banking hax
|
||||
bank = (page & ~((1 << self.bank_mask) - 1)) >> self.bank_mask
|
||||
page2 = (page & ((1 << self.bank_mask) - 1))
|
||||
page2 = bank * (self.blocks_per_bank) * self.pagesPerBlock + page2
|
||||
spare, data = self.image.readPage(ce, page2)
|
||||
else:
|
||||
spare, data = self.image.readPage(ce, page)
|
||||
if not data:
|
||||
return None,None
|
||||
if self.metadata_whitening and spare != "\x00"*12 and len(spare) == 12:
|
||||
spare = self.unwhitenMetadata(spare, page)
|
||||
spare = spareType.parse(spare)
|
||||
if key and self.encrypted:
|
||||
if lpn != None: pageNum = spare.lpn #XXX
|
||||
else: pageNum = page
|
||||
return spare, self.decryptPage(data, key, pageNum)
|
||||
return spare, data
|
||||
|
||||
def decryptPage(self, data, key, pageNum):
|
||||
return AESdecryptCBC(data, key, ivForPage(pageNum))
|
||||
|
||||
def unpackSpecialPage(self, data):
|
||||
l = struct.unpack("<L", data[0x34:0x38])[0]
|
||||
return data[0x38:0x38 + l]
|
||||
|
||||
def readSpecialPages(self, ce, magics):
|
||||
print "Searching for special pages..."
|
||||
specials = {}
|
||||
if self.nandonly:
|
||||
magics.append("DEVICEUNIQUEINFO")#, "DIAGCONTROLINFO")
|
||||
magics = map(lambda s: s.ljust(16,"\x00"), magics)
|
||||
|
||||
lowestBlock = self.blocksPerCE - (self.blocksPerCE / 100)
|
||||
for block in xrange(self.blocksPerCE - 1, lowestBlock, -1):
|
||||
if len(magics) == 0:
|
||||
break
|
||||
#hax for physical banking
|
||||
bank_offset = self.bank_address_space * (block / self.blocks_per_bank)
|
||||
for page in xrange(self.pagesPerBlock,-1,-1):
|
||||
page = (bank_offset + block % self.blocks_per_bank) * self.pagesPerBlock + page
|
||||
s, data = self.readPage(ce, page)
|
||||
if data == None:
|
||||
continue
|
||||
if data[:16] in magics:
|
||||
self.encrypted = False
|
||||
magics.remove(data[:16])
|
||||
specials[data[:16].rstrip("\x00")] = self.unpackSpecialPage(data)
|
||||
break
|
||||
data = self.decryptPage(data, META_KEY, page)
|
||||
#print data[:16]
|
||||
if data[:16] in magics:
|
||||
#print data[:16], block, page
|
||||
self.encrypted = True
|
||||
magics.remove(data[:16])
|
||||
specials[data[:16].rstrip("\x00")] = self.unpackSpecialPage(data)
|
||||
break
|
||||
return specials
|
||||
|
||||
def readLPN(self, lpn, key):
|
||||
return self.ftl.readLPN(lpn, key)
|
||||
|
||||
def readVPN(self, vpn, key=None, lpn=None):
|
||||
return self.vfl.read_single_page(vpn, key, lpn)
|
||||
|
||||
def dumpSystemPartition(self, outputfilename):
|
||||
return self.getPartitionBlockDevice(0).dumpToFile(outputfilename)
|
||||
|
||||
def dumpDataPartition(self, emf, outputfilename):
|
||||
return self.getPartitionBlockDevice(1, emf).dumpToFile(outputfilename)
|
||||
|
||||
def isIOS5(self):
|
||||
self.getPartitionTable()
|
||||
return self.iosVersion == 5
|
||||
|
||||
def getPartitionTable(self):
|
||||
if self.partition_table:
|
||||
return self.partition_table
|
||||
pt = None
|
||||
for i in xrange(10):
|
||||
d = self.readLPN(i, FILESYSTEM_KEY)
|
||||
pt = parse_mbr(d)
|
||||
if pt:
|
||||
self.hasMBR = True
|
||||
self.iosVersion = 3
|
||||
break
|
||||
gpt = parse_gpt(d)
|
||||
if gpt:
|
||||
off = gpt.partition_entries_lba - gpt.current_lba
|
||||
d = self.readLPN(i+off, FILESYSTEM_KEY)
|
||||
pt = GPT_partitions.parse(d)[:-1]
|
||||
self.iosVersion = 4
|
||||
break
|
||||
pt = parse_lwvm(d, self.pageSize)
|
||||
if pt:
|
||||
self.iosVersion = 5
|
||||
break
|
||||
self.partition_table = pt
|
||||
return pt
|
||||
|
||||
def getPartitionBlockDevice(self, partNum, key=None):
|
||||
pt = self.getPartitionTable()
|
||||
if self.hasMBR and pt[1].type == APPLE_ENCRYPTED and partNum == 1:
|
||||
data = self.readLPN(pt[1].last_lba - 1, FILESYSTEM_KEY)
|
||||
key = getEMFkeyFromCRPT(data, self.device_infos["key89B"].decode("hex"))
|
||||
if key == None:
|
||||
if partNum == 0:
|
||||
key = FILESYSTEM_KEY
|
||||
elif partNum == 1 and self.device_infos.has_key("EMF"):
|
||||
key = self.device_infos["EMF"].decode("hex")
|
||||
return FTLBlockDevice(self, pt[partNum].first_lba, pt[partNum].last_lba, key)
|
||||
|
||||
def getPartitionVolume(self, partNum, key=None):
|
||||
bdev = self.getPartitionBlockDevice(partNum, key)
|
||||
if partNum == 0:
|
||||
return HFSVolume(bdev)
|
||||
elif partNum == 1:
|
||||
self.device_infos["dataVolumeOffset"] = self.getPartitionTable()[partNum].first_lba
|
||||
return EMFVolume(bdev, self.device_infos)
|
||||
|
||||
def findLockersUnit(self):
|
||||
if not self.nandonly:
|
||||
return
|
||||
for i in xrange(96,128):
|
||||
for ce in xrange(self.nCEs):
|
||||
s, d = self.readBlockPage(ce, 1, i)
|
||||
if d and check_effaceable_header(d):
|
||||
print "Found effaceable lockers in ce %d block 1 page %d" % (ce,i)
|
||||
return d
|
||||
|
||||
def getLockers(self):
|
||||
unit = self.findLockersUnit()
|
||||
if unit:
|
||||
return unit[0x40:0x40+960]
|
||||
|
||||
def getEMF(self, k89b):
|
||||
return self.lockers.get_EMF(k89b)
|
||||
|
||||
def getDKey(self, k835):
|
||||
return self.lockers.get_DKey(k835)
|
||||
|
||||
def readBootPartition(self, block_start, block_end):
|
||||
res = ""
|
||||
for i in xrange(block_start*self.pagesPerBlock, block_end*self.pagesPerBlock):
|
||||
res += self.readBootPage(0, i)
|
||||
return res
|
||||
|
||||
def get_img3s(self):
|
||||
if not self.nandonly:
|
||||
print "IMG3s are in NOR"
|
||||
return []
|
||||
blob = self.readBootPartition(8, 16)
|
||||
hdr = IMG2.parse(blob[:0x100])
|
||||
i = hdr.images_block * hdr.block_size + hdr.images_offset
|
||||
img3s = extract_img3s(blob[i:i+hdr.images_length*hdr.block_size])
|
||||
|
||||
boot = self.readBootPartition(0, 1)
|
||||
img3s = extract_img3s(boot[0xc00:]) + img3s
|
||||
return img3s
|
||||
|
||||
def extract_img3s(self, outfolder=None):
|
||||
if not self.nandonly:
|
||||
print "IMG3s are in NOR"
|
||||
return
|
||||
if outfolder == None:
|
||||
if self.filename != "remote": outfolder = os.path.join(os.path.dirname(self.filename), "img3")
|
||||
else: outfolder = os.path.join(".", "img3")
|
||||
makedirs(outfolder)
|
||||
print "Extracting IMG3s to %s" % outfolder
|
||||
for img3 in self.get_img3s():
|
||||
#print img3.sigcheck(self.device_infos.get("key89A").decode("hex"))
|
||||
print img3.shortname
|
||||
write_file(outfolder+ "/%s.img3" % img3.shortname, img3.img3)
|
||||
kernel = self.getPartitionVolume(0).readFile("/System/Library/Caches/com.apple.kernelcaches/kernelcache",returnString=True)
|
||||
if kernel:
|
||||
print "kernel"
|
||||
write_file(outfolder + "/kernelcache.img3", kernel)
|
||||
|
||||
def extract_shsh(self, outfolder="."):
|
||||
if not self.nandonly:
|
||||
print "IMG3s are in NOR"
|
||||
return
|
||||
pass
|
||||
|
||||
def getNVRAM(self):
|
||||
if not self.nandonly:
|
||||
print "NVRAM is in NOR"
|
||||
return
|
||||
#TODO
|
||||
nvrm = self.readBootPartition(2, 8)
|
||||
|
||||
def getBoot(self):
|
||||
boot = self.readBootPartition(0, 1)
|
||||
for i in xrange(0x400, 0x600, 16):
|
||||
name = boot[i:i+4][::-1]
|
||||
block_start, block_end, flag = struct.unpack("<LLL", boot[i+4:i+16])
|
||||
if name == "none":
|
||||
break
|
||||
print name, block_start, block_end, flag
|
||||
|
||||
def cacheData(self, name, data):
|
||||
if self.filename == "remote":
|
||||
return None
|
||||
save_pickle(self.filename + "." + name, data)
|
||||
|
||||
def loadCachedData(self, name):
|
||||
try:
|
||||
if self.filename == "remote":
|
||||
return None
|
||||
return load_pickle(self.filename + "." + name)
|
||||
except:
|
||||
return None
|
||||
|
||||
def dump(self, p):
|
||||
#hax ioflashstoragekit can only handle 1 connexion
|
||||
if self.filename == "remote":
|
||||
del self.image
|
||||
ioflash = IOFlashStorageKitClient()
|
||||
ioflash.dump_nand(p)
|
||||
#restore proxy
|
||||
if self.filename == "remote":
|
||||
self.image = NANDRemote(self.pageSize, self.metaSize, self.pagesPerBlock, self.bfn)
|
@ -0,0 +1,132 @@
|
||||
from construct import *
|
||||
from zipfile import crc32
|
||||
|
||||
GPT_HFS = "005346480000aa11aa1100306543ecac".decode("hex")
|
||||
GPT_EMF = "00464d450000aa11aa1100306543ecac".decode("hex")
|
||||
|
||||
LWVM_partitionRecord = Struct("LWVM_partitionRecord",
|
||||
String("type", 16),
|
||||
String("guid", 16),
|
||||
ULInt64("begin"),
|
||||
ULInt64("end"),
|
||||
ULInt64("attribute"),
|
||||
String("name", 0x48, encoding="utf-16-le", padchar="\x00")
|
||||
)
|
||||
|
||||
LWVM_MAGIC = "6a9088cf8afd630ae351e24887e0b98b".decode("hex")
|
||||
LWVM_header = Struct("LWVM_header",
|
||||
String("type",16),
|
||||
String("guid", 16),
|
||||
ULInt64("mediaSize"),
|
||||
ULInt32("numPartitions"),
|
||||
ULInt32("crc32"),
|
||||
Padding(464),
|
||||
Array(12, LWVM_partitionRecord),
|
||||
Array(1024, ULInt16("chunks"))
|
||||
)
|
||||
|
||||
GPT_header = Struct("GPT_header",
|
||||
String("signature", 8),
|
||||
ULInt32("revision"),
|
||||
ULInt32("header_size"),
|
||||
SLInt32("crc"), #hax to match python signed crc
|
||||
ULInt32("zero"),
|
||||
ULInt64("current_lba"),
|
||||
ULInt64("backup_lba"),
|
||||
ULInt64("first_usable_lba"),
|
||||
ULInt64("last_usable_lba"),
|
||||
String("disk_guid", 16),
|
||||
ULInt64("partition_entries_lba"),
|
||||
ULInt32("num_partition_entries"),
|
||||
ULInt32("size_partition_entry"),
|
||||
ULInt32("crc_partition_entries")
|
||||
)
|
||||
|
||||
GPT_entry = Struct("GPT_entry",
|
||||
String("partition_type_guid", 16),
|
||||
String("partition_guid", 16),
|
||||
ULInt64("first_lba"),
|
||||
ULInt64("last_lba"),
|
||||
ULInt64("attributes"),
|
||||
String("name", 72, encoding="utf-16-le", padchar="\x00"),
|
||||
)
|
||||
|
||||
GPT_partitions = RepeatUntil(lambda obj, ctx: obj["partition_type_guid"] == "\x00"*16, GPT_entry)
|
||||
|
||||
APPLE_ENCRYPTED = 0xAE
|
||||
MBR_entry = Struct("MBR_entry",
|
||||
Byte("status"),
|
||||
Bytes("chs_start",3),
|
||||
Byte("type"),
|
||||
Bytes("chs_last",3),
|
||||
ULInt32("lba_start"),
|
||||
ULInt32("num_sectors")
|
||||
)
|
||||
|
||||
MBR = Struct("MBR",
|
||||
String("code",440),
|
||||
ULInt32("signature"),
|
||||
ULInt16("zero"),
|
||||
Array(4, MBR_entry),
|
||||
OneOf(ULInt16("magic"), [0xAA55])
|
||||
)
|
||||
|
||||
def parse_mbr(data):
|
||||
try:
|
||||
mbr = MBR.parse(data)
|
||||
if mbr.MBR_entry[0].type == 0xEE:
|
||||
print "Found protective MBR"
|
||||
return None
|
||||
res = mbr.MBR_entry[:2]
|
||||
for p in res:
|
||||
p.first_lba = p.lba_start
|
||||
p.last_lba = p.lba_start + p.num_sectors
|
||||
return res
|
||||
except:
|
||||
return None
|
||||
|
||||
def parse_gpt(data):
|
||||
gpt = GPT_header.parse(data)
|
||||
if gpt.signature != "EFI PART":
|
||||
return None
|
||||
print "Found GPT header current_lba=%d partition_entries_lba=%d" % (gpt.current_lba, gpt.partition_entries_lba)
|
||||
assert gpt.partition_entries_lba > gpt.current_lba
|
||||
check = gpt.crc
|
||||
gpt.crc = 0
|
||||
actual = crc32(GPT_header.build(gpt))
|
||||
if actual != check:
|
||||
print "GPT crc check fail %d vs %d" % (actual, check)
|
||||
return None
|
||||
return gpt
|
||||
|
||||
def parse_lwvm(data, pageSize):
|
||||
try:
|
||||
hdr = LWVM_header.parse(data)
|
||||
if hdr.type != LWVM_MAGIC:
|
||||
print "LwVM magic mismatch"
|
||||
return
|
||||
tocheck = data[:44] + "\x00\x00\x00\x00" + data[48:0x1000]
|
||||
check = crc32(tocheck) & 0xffffffff
|
||||
if check != hdr.crc32:
|
||||
return None
|
||||
print "LwVM header CRC OK"
|
||||
partitions = hdr.LWVM_partitionRecord[:hdr.numPartitions]
|
||||
deviceSize=0
|
||||
#XXX: HAAAAAAAX
|
||||
for s in [8, 16, 32, 64, 128]:
|
||||
if hdr.mediaSize < (s* 1024*1024*1024):
|
||||
deviceSize = s
|
||||
break
|
||||
for i in xrange(len(hdr.chunks)):
|
||||
if hdr.chunks[i] == 0x0:
|
||||
lba0 = (i * deviceSize*1024*1024) / pageSize
|
||||
partitions[0].first_lba = lba0
|
||||
partitions[0].last_lba = lba0 + (partitions[0].end - partitions[0].begin) / pageSize
|
||||
elif hdr.chunks[i] == 0x1000:
|
||||
lbad = (i * deviceSize*1024*1024) / pageSize
|
||||
partitions[1].first_lba = lbad
|
||||
partitions[1].last_lba = lbad + (partitions[1].end - partitions[1].begin) / pageSize
|
||||
return partitions
|
||||
except:
|
||||
return None
|
||||
|
@ -0,0 +1,99 @@
|
||||
from progressbar import ProgressBar
|
||||
from usbmux import usbmux
|
||||
from util import hexdump, sizeof_fmt
|
||||
import datetime
|
||||
import hashlib
|
||||
import struct
|
||||
import os
|
||||
|
||||
CMD_DUMP = 0
|
||||
CMD_PROXY = 1
|
||||
kIOFlashStorageOptionRawPageIO = 0x002
|
||||
kIOFlashStorageOptionBootPageIO = 0x100
|
||||
|
||||
class IOFlashStorageKitClient(object):
|
||||
def __init__(self, udid=None, host="localhost", port=2000):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.connect(udid)
|
||||
|
||||
def connect(self, udid=None):
|
||||
mux = usbmux.USBMux()
|
||||
mux.process(1.0)
|
||||
if not mux.devices:
|
||||
print "Waiting for iOS device"
|
||||
while not mux.devices:
|
||||
mux.process(1.0)
|
||||
if not mux.devices:
|
||||
print "No device found"
|
||||
return
|
||||
dev = mux.devices[0]
|
||||
try:
|
||||
self.s = mux.connect(dev, self.port)
|
||||
except:
|
||||
raise Exception("Connexion to device %s port %d failed" % (dev.serial, self.port))
|
||||
|
||||
def send_command(self, cmd):
|
||||
return self.s.send(struct.pack("<L", cmd))
|
||||
|
||||
def dump_nand(self, filename):
|
||||
f = open(filename, "wb")
|
||||
self.send_command(CMD_DUMP)
|
||||
zz = self.s.recv(8)
|
||||
totalSize = struct.unpack("<Q", zz)[0]
|
||||
recvSize = 0
|
||||
print "Dumping %s NAND to %s" % (sizeof_fmt(totalSize), filename)
|
||||
pbar = ProgressBar(totalSize)
|
||||
pbar.start()
|
||||
h = hashlib.sha1()
|
||||
while recvSize < totalSize:
|
||||
pbar.update(recvSize)
|
||||
d = self.s.recv(8192*2)
|
||||
if not d or len(d) == 0:
|
||||
break
|
||||
h.update(d)
|
||||
f.write(d)
|
||||
recvSize += len(d)
|
||||
pbar.finish()
|
||||
f.close()
|
||||
print "NAND dump time : %s" % str(datetime.timedelta(seconds=pbar.seconds_elapsed))
|
||||
print "SHA1: %s" % h.hexdigest()
|
||||
if recvSize != totalSize:
|
||||
print "dump_nand FAIL"
|
||||
|
||||
class NANDRemote(object):
|
||||
def __init__(self, pageSize, spareSize, pagesPerBlock, bfn):
|
||||
self.spareSize = spareSize
|
||||
self.pageSize = pageSize
|
||||
self.pagesPerBlock = pagesPerBlock
|
||||
self.bootFromNand = bfn
|
||||
self.client = IOFlashStorageKitClient()
|
||||
self.client.send_command(CMD_PROXY)
|
||||
|
||||
def readPage(self, ce, page):
|
||||
options = 0
|
||||
spareSize = self.spareSize
|
||||
if self.bootFromNand and page < 16*self.pagesPerBlock:#XXX hardcoded for now
|
||||
options = kIOFlashStorageOptionBootPageIO
|
||||
spareSize = 0
|
||||
d = struct.pack("<LLLL", ce, page, spareSize, options)
|
||||
|
||||
self.client.s.send(d)
|
||||
|
||||
torecv = self.pageSize+8+spareSize
|
||||
d = ""
|
||||
while len(d) != torecv:
|
||||
zz = self.client.s.recv(torecv)
|
||||
if not zz:
|
||||
break
|
||||
d += zz
|
||||
pageData = d[:self.pageSize]
|
||||
spareData = d[self.pageSize:self.pageSize+spareSize]
|
||||
r1,r2 = struct.unpack("<LL", d[self.pageSize+spareSize:self.pageSize+spareSize+8])
|
||||
|
||||
if r1 == 0xe00002e5:
|
||||
return None, None
|
||||
#print ce, page, "%x" % r1, r2, pageData[:0x10].encode("hex"), spareData[:0x10].encode("hex")
|
||||
if spareData == "":
|
||||
spareData = "\xFF" * self.spareSize
|
||||
return spareData, pageData
|
@ -0,0 +1,83 @@
|
||||
from construct.core import Struct, Union
|
||||
from construct.macros import *
|
||||
|
||||
#hardcoded iOS keys
|
||||
META_KEY = "92a742ab08c969bf006c9412d3cc79a5".decode("hex")
|
||||
FILESYSTEM_KEY = "f65dae950e906c42b254cc58fc78eece".decode("hex")
|
||||
|
||||
def next_power_of_two(z):
|
||||
i = 1
|
||||
while i < z:
|
||||
i <<= 1
|
||||
return i
|
||||
|
||||
def CEIL_DIVIDE(val, amt):
|
||||
return (((val) + (amt) - 1) / (amt))
|
||||
|
||||
#from openiboot/plat-s5l8920/h2fmi.c
|
||||
#blocks_per_ce, pages_per_block, bytes_per_page, bytes_per_spare, unk5, unk6, unk7, banks_per_ce, unk9
|
||||
#some values change in openiboot/plat-a4/h2fmi.c, but banks_per_ce is ok
|
||||
nand_chip_info = {
|
||||
0x7294D7EC : [ 0x1038, 0x80, 0x2000, 0x1B4, 0xC, 0, 8, 1, 0 ],
|
||||
0x72D5DEEC : [ 0x2070, 0x80, 0x2000, 0x1B4, 0xC, 0, 8, 2, 0 ],
|
||||
0x29D5D7EC : [ 0x2000, 0x80, 0x1000, 0xDA, 8, 0, 2, 2, 0 ],
|
||||
0x2994D5EC : [ 0x1000, 0x80, 0x1000, 0xDA, 8, 0, 2, 1, 0 ],
|
||||
0xB614D5EC : [ 0x1000, 0x80, 0x1000, 0x80, 4, 0, 2, 1, 0 ],
|
||||
0xB655D7EC : [ 0x2000, 0x80, 0x1000, 0x80, 4, 0, 2, 2, 0 ],
|
||||
0xB614D5AD : [ 0x1000, 0x80, 0x1000, 0x80, 4, 0, 3, 1, 0 ],
|
||||
0x3294E798 : [ 0x1004, 0x80, 0x2000, 0x1C0, 0x10, 0, 1, 1, 0 ],
|
||||
0xBA94D598 : [ 0x1000, 0x80, 0x1000, 0xDA, 8, 0, 1, 1, 0 ],
|
||||
0xBA95D798 : [ 0x2000, 0x80, 0x1000, 0xDA, 8, 0, 1, 2, 0 ],
|
||||
0x3294D798 : [ 0x1034, 0x80, 0x2000, 0x178, 8, 0, 1, 1, 0 ],
|
||||
0x3295DE98 : [ 0x2068, 0x80, 0x2000, 0x178, 8, 0, 1, 2, 0 ],
|
||||
0x3295EE98 : [ 0x2008, 0x80, 0x2000, 0x1C0, 0x18, 0, 1, 2, 0 ],
|
||||
0x3E94D789 : [ 0x2000, 0x80, 0x1000, 0xDA, 0x10, 0, 5, 1, 0 ],
|
||||
0x3ED5D789 : [ 0x2000, 0x80, 0x1000, 0xDA, 8, 0, 6, 2, 0 ],
|
||||
0x3ED5D72C : [ 0x2000, 0x80, 0x1000, 0xDA, 8, 0, 5, 2, 0 ],
|
||||
0x3E94D72C : [ 0x2000, 0x80, 0x1000, 0xDA, 0xC, 0, 7, 1, 0 ],
|
||||
0x4604682C : [ 0x1000, 0x100, 0x1000, 0xE0, 0xC, 0, 7, 1, 0 ],
|
||||
0x3294D745 : [ 0x1000, 0x80, 0x2000, 0x178, 8, 0, 9, 1, 0 ],
|
||||
0x3295DE45 : [ 0x2000, 0x80, 0x2000, 0x178, 8, 0, 9, 2, 0 ],
|
||||
0x32944845 : [ 0x1000, 0x80, 0x2000, 0x1C0, 8, 0, 9, 1, 0 ],
|
||||
0x32956845 : [ 0x2000, 0x80, 0x2000, 0x1C0, 8, 0, 9, 2, 0 ],
|
||||
}
|
||||
|
||||
#https://github.com/iDroid-Project/openiBoot/blob/master/openiboot/plat-a4/h2fmi.c
|
||||
def gen_h2fmi_hash_table():
|
||||
val = 0x50F4546A;
|
||||
h2fmi_hash_table = [0]*256
|
||||
for i in xrange(256):
|
||||
val = ((0x19660D * val) + 0x3C6EF35F) & 0xffffffff;
|
||||
for j in xrange(762):
|
||||
val = ((0x19660D * val) + 0x3C6EF35F) & 0xffffffff;
|
||||
h2fmi_hash_table[i] = val & 0xffffffff
|
||||
return h2fmi_hash_table
|
||||
|
||||
# Page types (as defined in the spare data "type" bitfield)
|
||||
PAGETYPE_INDEX = 0x4 #Index block indicator
|
||||
PAGETYPE_LBN = 0x10 # User data
|
||||
PAGETYPE_FTL_CLEAN = 0x20 # FTL context (unmounted, clean)
|
||||
PAGETYPE_VFL = 0x80 #/ VFL context
|
||||
|
||||
SpareData = Struct("SpareData",
|
||||
ULInt32("lpn"),
|
||||
ULInt32("usn"),
|
||||
ULInt8("field_8"),
|
||||
ULInt8("type"),
|
||||
ULInt16("field_A")
|
||||
)
|
||||
|
||||
# Block status (as defined in the BlockStruct structure)
|
||||
BLOCKSTATUS_ALLOCATED = 0x1
|
||||
BLOCKSTATUS_FTLCTRL = 0x2
|
||||
BLOCKSTATUS_GC = 0x4
|
||||
BLOCKSTATUS_CURRENT = 0x8
|
||||
BLOCKSTATUS_FTLCTRL_SEL = 0x10
|
||||
BLOCKSTATUS_I_GC = 0x20
|
||||
BLOCKSTATUS_I_ALLOCATED = 0x40
|
||||
BLOCKSTATUS_I_CURRENT = 0x80
|
||||
BLOCKSTATUS_FREE = 0xFF
|
||||
|
||||
ERROR_ARG = 0x80000001
|
||||
ERROR_NAND = 0x80000002
|
||||
ERROR_EMPTY = 0x80000003
|
188
dump-imessages/iphone-dataprotection/python_scripts/nand/vfl.py
Normal file
188
dump-imessages/iphone-dataprotection/python_scripts/nand/vfl.py
Normal file
@ -0,0 +1,188 @@
|
||||
from array import array
|
||||
from construct.core import Struct, Union
|
||||
from construct.macros import *
|
||||
from structs import next_power_of_two, CEIL_DIVIDE, PAGETYPE_VFL
|
||||
import struct
|
||||
|
||||
"""
|
||||
https://github.com/iDroid-Project/openiBoot/blob/master/plat-s5l8900/includes/s5l8900/ftl.h
|
||||
https://github.com/iDroid-Project/openiBoot/blob/master/plat-s5l8900/ftl.c
|
||||
https://github.com/iDroid-Project/openiBoot/blob/master/plat-s5l8900/nand.c
|
||||
|
||||
static const NANDDeviceType SupportedDevices[] = {
|
||||
"""
|
||||
SupportedDevices = {0x2555D5EC: [8192, 128, 4, 64, 4, 2, 4, 2, 7744, 4, 6],
|
||||
0xB614D5EC: [4096, 128, 8, 128, 4, 2, 4, 2, 3872, 4, 6],
|
||||
0xB655D7EC: [8192, 128, 8, 128, 4, 2, 4, 2, 7744, 4, 6],
|
||||
0xA514D3AD: [4096, 128, 4, 64, 4, 2, 4, 2, 3872, 4, 6],
|
||||
0xA555D5AD: [8192, 128, 4, 64, 4, 2, 4, 2, 7744, 4, 6],
|
||||
0xB614D5AD: [4096, 128, 8, 128, 4, 2, 4, 2, 3872, 4, 6],
|
||||
0xB655D7AD: [8192, 128, 8, 128, 4, 2, 4, 2, 7744, 4, 6],
|
||||
0xA585D598: [8320, 128, 4, 64, 6, 2, 4, 2, 7744, 4, 6],
|
||||
0xBA94D598: [4096, 128, 8, 216, 6, 2, 4, 2, 3872, 8, 8],
|
||||
0xBA95D798: [8192, 128, 8, 216, 6, 2, 4, 2, 7744, 8, 8],
|
||||
0x3ED5D789: [8192, 128, 8, 216, 4, 2, 4, 2, 7744, 8, 8],
|
||||
0x3E94D589: [4096, 128, 8, 216, 4, 2, 4, 2, 3872, 8, 8],
|
||||
0x3ED5D72C: [8192, 128, 8, 216, 4, 2, 4, 2, 7744, 8, 8],
|
||||
0x3E94D52C: [4096, 128, 8, 216, 4, 2, 4, 2, 3872, 8, 8]
|
||||
}
|
||||
|
||||
_vfl_vfl_context = Struct("_vfl_vfl_context",
|
||||
ULInt32("usn_inc"),
|
||||
Array(3, ULInt16("control_block")),
|
||||
ULInt16("unk1"),
|
||||
ULInt32("usn_dec"),
|
||||
ULInt16("active_context_block"),
|
||||
ULInt16("next_context_page"),
|
||||
ULInt16("unk2"),
|
||||
ULInt16("field_16"),
|
||||
ULInt16("field_18"),
|
||||
ULInt16("num_reserved_blocks"),
|
||||
ULInt16("reserved_block_pool_start"),
|
||||
ULInt16("total_reserved_blocks"),
|
||||
Array(820, ULInt16("reserved_block_pool_map")),
|
||||
Array(282, ULInt8("bad_block_table")),
|
||||
Array(4, ULInt16("vfl_context_block")),
|
||||
ULInt16("remapping_schedule_start"),
|
||||
Array(0x48, ULInt8("unk3")),
|
||||
ULInt32("version"),
|
||||
ULInt32("checksum1"),
|
||||
ULInt32("checksum2")
|
||||
)
|
||||
|
||||
_vfl_vsvfl_spare_data = Struct("_vfl_vsvfl_spare_data",
|
||||
Union("foo",
|
||||
Struct("user",ULInt32("logicalPageNumber"),ULInt32("usn")),
|
||||
Struct("meta",ULInt32("usnDec"),ULInt16("idx"), ULInt8("field_6"), ULInt8("field_7"))
|
||||
),
|
||||
ULInt8("type2"),
|
||||
ULInt8("type1"),
|
||||
ULInt8("eccMark"),
|
||||
ULInt8("field_B"),
|
||||
)
|
||||
|
||||
def vfl_checksum(data):
|
||||
x = 0
|
||||
y = 0
|
||||
for z in array("I", data):
|
||||
x = (x + z) & 0xffffffff
|
||||
y = (y ^ z) & 0xffffffff
|
||||
return (x + 0xAABBCCDD) & 0xffffffff, (y ^ 0xAABBCCDD) & 0xffffffff
|
||||
|
||||
def vfl_check_checksum(ctx, ctxtype):
|
||||
c1, c2 = vfl_checksum(ctxtype.build(ctx)[:-8])
|
||||
return c1 == ctx.checksum1 and c2 == ctx.checksum2
|
||||
|
||||
class VFL(object):
|
||||
def __init__(self, nand):
|
||||
self.nand = nand
|
||||
#XXX check
|
||||
self.banks_total = nand.nCEs * nand.banks_per_ce_physical
|
||||
self.num_ce = nand.nCEs
|
||||
self.banks_per_ce = nand.banks_per_ce_physical
|
||||
self.blocks_per_ce = nand.blocksPerCE
|
||||
self.pages_per_block = nand.pagesPerBlock
|
||||
self.pages_per_block_2 = next_power_of_two(self.pages_per_block)
|
||||
self.pages_per_sublk = self.pages_per_block * self.banks_per_ce * self.num_ce
|
||||
self.blocks_per_bank = self.blocks_per_ce / self.banks_per_ce
|
||||
self.blocks_per_bank_vfl = self.blocks_per_ce / self.banks_per_ce
|
||||
self.vendorType = nand.vendorType
|
||||
self.fs_start_block = 5
|
||||
|
||||
#field_4 = 5;
|
||||
if not SupportedDevices.has_key(nand.deviceReadId):
|
||||
raise Exception("VFL: unsupported device 0x%x" % nand.deviceReadId)
|
||||
userSuBlksTotal = self.userSuBlksTotal = SupportedDevices[nand.deviceReadId][8]#7744
|
||||
userPagesTotal = userSuBlksTotal * self.pages_per_sublk
|
||||
suBlksTotal = self.blocks_per_ce
|
||||
|
||||
FTLData_field_2 = suBlksTotal - userSuBlksTotal - 28
|
||||
print suBlksTotal, userSuBlksTotal, FTLData_field_2
|
||||
FTLData_field_4 = FTLData_field_2 + 5
|
||||
self.FTLData_field_4 = FTLData_field_4
|
||||
#FTLData_sysSuBlks = FTLData_field_2 + 4
|
||||
#FTLData_field_6 = 3
|
||||
#FTLData_field_8 = 23
|
||||
|
||||
self.vflContexts = []
|
||||
self.bbt = []
|
||||
self.current_version = 0
|
||||
self.context = None
|
||||
reserved_blocks = 0
|
||||
fs_start_block = reserved_blocks+10 #XXX
|
||||
for ce in xrange(self.num_ce):
|
||||
for b in xrange(reserved_blocks, fs_start_block):
|
||||
s, d = nand.readMetaPage(ce, b, 0, _vfl_vsvfl_spare_data)
|
||||
if not d:
|
||||
continue
|
||||
vflctx = _vfl_vfl_context.parse(d)
|
||||
if not vfl_check_checksum(vflctx, _vfl_vfl_context):
|
||||
vflctx = None
|
||||
continue
|
||||
break
|
||||
MostRecentVFLCxtBlock = -1
|
||||
minUsn = 0xFFFFFFFF
|
||||
for b in vflctx.vfl_context_block:
|
||||
s, d = nand.readMetaPage(ce, b, 0, _vfl_vsvfl_spare_data)
|
||||
if not d:
|
||||
continue
|
||||
if s.foo.meta.usnDec > 0 and s.foo.meta.usnDec <= minUsn:
|
||||
minUsn = s.foo.meta.usnDec;
|
||||
MostRecentVFLCxtBlock = b
|
||||
if MostRecentVFLCxtBlock == -1:
|
||||
print "MostRecentVFLCxtBlock == -1"
|
||||
return
|
||||
last = None
|
||||
for pageNum in xrange(0, self.pages_per_block, 1):
|
||||
s,d = nand.readMetaPage(ce, MostRecentVFLCxtBlock, pageNum, _vfl_vsvfl_spare_data)
|
||||
if not d:
|
||||
break
|
||||
vflctx = _vfl_vfl_context.parse(d)
|
||||
if vfl_check_checksum(vflctx, _vfl_vfl_context):
|
||||
last = vflctx
|
||||
if not last:
|
||||
raise Exception("VFL open FAIL 1")
|
||||
self.vflContexts.append(last)
|
||||
if last.version == 1 and last.usn_inc >= self.current_version:
|
||||
self.current_version = last.usn_inc
|
||||
self.context = last
|
||||
if not self.context:
|
||||
raise Exception("VFL open FAIL")
|
||||
|
||||
print "VFL context open OK"
|
||||
|
||||
def VFL_get_FTLCtrlBlock(self):
|
||||
for ctx in self.vflContexts:
|
||||
if ctx.usn_inc == self.current_version:
|
||||
return ctx.control_block
|
||||
|
||||
def vfl_is_good_block(self, bbt, block):
|
||||
if block > self.blocks_per_ce:
|
||||
raise Exception("vfl_is_good_block block %d out of bounds" % block)
|
||||
index = block/8
|
||||
return ((bbt[index / 8] >> (7 - (index % 8))) & 0x1) == 0x1
|
||||
|
||||
def virtual_block_to_physical_block(self, ce, pBlock):
|
||||
if self.vfl_is_good_block(self.vflContexts[ce].bad_block_table, pBlock):
|
||||
return pBlock
|
||||
ctx = self.vflContexts[ce]
|
||||
for pwDesPbn in xrange(0, ctx.num_reserved_blocks):
|
||||
if ctx.reserved_block_pool_map[pwDesPbn] == pBlock:
|
||||
if pwDesPbn > self.blocks_per_ce:
|
||||
raise Exception("Destination physical block for remapping is greater than number of blocks per bank!")
|
||||
return ctx.reserved_block_pool_start + pwDesPbn
|
||||
print "Bad block %d not remapped" % pBlock
|
||||
return pBlock
|
||||
|
||||
def virtual_page_number_to_virtual_address(self, vpn):
|
||||
vbank = vpn % self.num_ce
|
||||
vblock = vpn / self.pages_per_sublk
|
||||
vpage = (vpn / self.num_ce) % self.pages_per_block
|
||||
return vbank, vblock, vpage
|
||||
|
||||
def read_single_page(self, vpn, key=None, lpn=None):
|
||||
vpn += self.pages_per_sublk * self.FTLData_field_4
|
||||
vbank, vblock, vpage = self.virtual_page_number_to_virtual_address(vpn)
|
||||
pblock = self.virtual_block_to_physical_block(vbank, vblock)
|
||||
#print "VFL read_single_page %d => %d, %d" % (vpn,ce,pPage)
|
||||
return self.nand.readPage(vbank, pblock*self.nand.pagesPerBlock + vpage, key, lpn)
|
@ -0,0 +1,193 @@
|
||||
from construct import *
|
||||
from structs import next_power_of_two, PAGETYPE_VFL, CEIL_DIVIDE
|
||||
from vfl import vfl_check_checksum, _vfl_vsvfl_spare_data
|
||||
|
||||
"""
|
||||
https://github.com/iDroid-Project/openiBoot/blob/master/vfl-vsvfl/vsvfl.c
|
||||
https://github.com/iDroid-Project/openiBoot/blob/master/vfl-vsvfl/includes/vfl/vsvfl.h
|
||||
"""
|
||||
|
||||
_vfl_vsvfl_context = Struct("_vfl_vsvfl_context",
|
||||
ULInt32("usn_inc"),
|
||||
ULInt32("usn_dec"),
|
||||
ULInt32("ftl_type"),
|
||||
ULInt16("usn_block"),
|
||||
ULInt16("usn_page"),
|
||||
ULInt16("active_context_block"),
|
||||
ULInt16("write_failure_count"),
|
||||
ULInt16("bad_block_count"),
|
||||
Array(4, ULInt8("replaced_block_count")),
|
||||
ULInt16("num_reserved_blocks"),
|
||||
ULInt16("field_1C"),
|
||||
ULInt16("total_reserved_blocks"),
|
||||
Array(6, ULInt8("field_20")),
|
||||
Array(820, ULInt16("reserved_block_pool_map")),
|
||||
Array(4, ULInt16("vfl_context_block")),
|
||||
ULInt16("usable_blocks_per_bank"),
|
||||
ULInt16("reserved_block_pool_start"),
|
||||
Array(3, ULInt16("control_block")),
|
||||
ULInt16("scrub_list_length"),
|
||||
Array(20, ULInt16("scrub_list")),
|
||||
Array(4, ULInt32("field_6CA")),
|
||||
ULInt32("vendor_type"),
|
||||
Array(204, ULInt8("field_6DE")),
|
||||
ULInt16("remapping_schedule_start"),
|
||||
Array(0x48, ULInt8("unk3")),
|
||||
ULInt32("version"),
|
||||
ULInt32("checksum1"),
|
||||
ULInt32("checksum2")
|
||||
)
|
||||
|
||||
|
||||
class VSVFL(object):
|
||||
def __init__(self, nand):
|
||||
self.nand = nand
|
||||
self.banks_per_ce_vfl = 1
|
||||
if self.nand.vendorType in [0x100010, 0x100014, 0x120014, 0x150011]:
|
||||
self.banks_per_ce_vfl = 2
|
||||
self.banks_total = nand.nCEs * self.banks_per_ce_vfl
|
||||
self.num_ce = nand.nCEs
|
||||
self.banks_per_ce = nand.banks_per_ce_physical
|
||||
self.blocks_per_ce = nand.blocksPerCE
|
||||
self.pages_per_block = nand.pagesPerBlock
|
||||
self.pages_per_block_2 = next_power_of_two(self.pages_per_block)
|
||||
self.pages_per_sublk = self.pages_per_block * self.banks_per_ce_vfl * self.num_ce
|
||||
self.blocks_per_bank = self.blocks_per_ce / self.banks_per_ce
|
||||
self.blocks_per_bank_vfl = self.blocks_per_ce / self.banks_per_ce_vfl
|
||||
self.vendorType = nand.vendorType
|
||||
if self.vendorType == 0x10001:
|
||||
self.virtual_to_physical = self.virtual_to_physical_10001
|
||||
elif self.vendorType == 0x150011:
|
||||
self.virtual_to_physical = self.virtual_to_physical_100014
|
||||
elif self.vendorType in [0x100010, 0x100014, 0x120014]:
|
||||
self.virtual_to_physical = self.virtual_to_physical_150011
|
||||
else:
|
||||
raise Exception("VSVFL: unsupported vendor 0x%x" % self.vendorType)
|
||||
self.bank_address_space = nand.bank_address_space
|
||||
self.vflContexts = []
|
||||
self.bbt = []
|
||||
self.current_version = 0
|
||||
reserved_blocks = 0
|
||||
if self.nand.bfn:
|
||||
reserved_blocks = 16
|
||||
fs_start_block = reserved_blocks+16 #XXX
|
||||
for ce in xrange(self.num_ce):
|
||||
vflctx = None
|
||||
for b in xrange(reserved_blocks, fs_start_block):
|
||||
s, d = nand.readMetaPage(ce, b, 0, _vfl_vsvfl_spare_data)
|
||||
if not d:
|
||||
continue
|
||||
vflctx = _vfl_vsvfl_context.parse(d)
|
||||
if not vfl_check_checksum(vflctx, _vfl_vsvfl_context):
|
||||
vflctx = None
|
||||
continue
|
||||
break
|
||||
if not vflctx:
|
||||
raise Exception("Unable to find VSVFL context for CE %d" % ce)
|
||||
MostRecentVFLCxtBlock = -1
|
||||
minUsn = 0xFFFFFFFF
|
||||
for b in vflctx.vfl_context_block:
|
||||
s, d = nand.readMetaPage(ce, b, 0, _vfl_vsvfl_spare_data)
|
||||
if not d or s.type1 != PAGETYPE_VFL:
|
||||
continue
|
||||
if s.foo.meta.usnDec > 0 and s.foo.meta.usnDec <= minUsn:
|
||||
minUsn = s.foo.meta.usnDec;
|
||||
MostRecentVFLCxtBlock = b
|
||||
if MostRecentVFLCxtBlock == -1:
|
||||
print "MostRecentVFLCxtBlock == -1"
|
||||
return
|
||||
last = None
|
||||
for pageNum in xrange(0, self.pages_per_block, 1):
|
||||
s,d = nand.readMetaPage(ce, MostRecentVFLCxtBlock, pageNum, _vfl_vsvfl_spare_data)
|
||||
if not d or s.type1 != PAGETYPE_VFL:
|
||||
break
|
||||
last = d
|
||||
vflctx = _vfl_vsvfl_context.parse(last)
|
||||
if not vfl_check_checksum(vflctx, _vfl_vsvfl_context):
|
||||
print "VSVFL checksum FAIL"
|
||||
self.vflContexts.append(vflctx)
|
||||
if vflctx.version == 2 and vflctx.usn_inc >= self.current_version:
|
||||
self.current_version = vflctx.usn_inc
|
||||
self.context = vflctx
|
||||
if not self.context:
|
||||
raise Exception("VSVFL open FAIL")
|
||||
|
||||
num_reserved = self.vflContexts[0].reserved_block_pool_start
|
||||
num_non_reserved = self.blocks_per_bank_vfl - num_reserved
|
||||
for ce in xrange(self.num_ce):
|
||||
bbt = [0xFF] * (CEIL_DIVIDE(self.blocks_per_ce, 8))
|
||||
ctx = self.vflContexts[ce]
|
||||
for bank in xrange(0, self.banks_per_ce_vfl):
|
||||
for i in xrange(0, num_non_reserved):
|
||||
mapEntry = ctx.reserved_block_pool_map[bank*num_non_reserved + i]
|
||||
if mapEntry == 0xFFF0:
|
||||
continue
|
||||
if mapEntry < self.blocks_per_ce:
|
||||
pBlock = mapEntry
|
||||
elif mapEntry > 0xFFF0:
|
||||
pBlock = self.virtual_block_to_physical_block(ce + bank * self.num_ce, num_reserved + i)
|
||||
else:
|
||||
print "VSVFL: bad map table"
|
||||
bbt[pBlock / 8] &= ~(1 << (pBlock % 8))
|
||||
self.bbt.append(bbt)
|
||||
print "VSVFL context open OK"
|
||||
|
||||
def VFL_get_FTLCtrlBlock(self):
|
||||
for ctx in self.vflContexts:
|
||||
if ctx.usn_inc == self.current_version:
|
||||
return ctx.control_block
|
||||
|
||||
def virtual_to_physical_10001(self, vBank, vPage):
|
||||
return vBank, vPage
|
||||
|
||||
def virtual_to_physical_100014(self, vBank, vPage):
|
||||
pBank = vBank / self.num_ce;
|
||||
pPage = ((self.pages_per_block - 1) & vPage) | (2 * (~(self.pages_per_block - 1) & vPage))
|
||||
if (pBank & 1):
|
||||
pPage |= self.pages_per_block
|
||||
return vBank % self.num_ce, pPage
|
||||
|
||||
def virtual_to_physical_150011(self, vBank, vPage):
|
||||
pBlock = 2 * (vPage / self.pages_per_block)
|
||||
if(vBank % (2 * self.num_ce) >= self.num_ce):
|
||||
pBlock += 1
|
||||
return vBank % self.num_ce, self.pages_per_block * pBlock | (vPage % 128)
|
||||
|
||||
def virtual_block_to_physical_block(self, vBank, vBlock):
|
||||
ce, pPage = self.virtual_to_physical(vBank, self.pages_per_block * vBlock)
|
||||
return pPage / self.pages_per_block
|
||||
|
||||
def vfl_is_good_block(self, bbt, block):
|
||||
if block > self.blocks_per_ce:
|
||||
raise Exception("vfl_is_good_block block %d out of bounds" % block)
|
||||
return (bbt[block / 8] & (1 << (block % 8))) != 0
|
||||
|
||||
def remap_block(self, ce, pBlock):
|
||||
if self.vfl_is_good_block(self.bbt[ce], pBlock):
|
||||
return pBlock
|
||||
ctx = self.vflContexts[ce]
|
||||
for pwDesPbn in xrange(0, self.blocks_per_ce - ctx.reserved_block_pool_start * self.banks_per_ce_vfl):
|
||||
if ctx.reserved_block_pool_map[pwDesPbn] == pBlock:
|
||||
vBank = ce + self.num_ce * (pwDesPbn / (self.blocks_per_bank_vfl - ctx.reserved_block_pool_start))
|
||||
vBlock = ctx.reserved_block_pool_start + (pwDesPbn % (self.blocks_per_bank_vfl - ctx.reserved_block_pool_start))
|
||||
z = self.virtual_block_to_physical_block(vBank, vBlock)
|
||||
#print "remapped block %d => %d" % (pBlock, z)
|
||||
return z
|
||||
print "Bad block %d not remapped" % pBlock
|
||||
return pBlock
|
||||
|
||||
def virtual_page_number_to_physical(self, vpn):
|
||||
vBank = vpn % self.banks_total
|
||||
ce = vBank % self.nand.nCEs
|
||||
|
||||
pBlock = self.virtual_block_to_physical_block(vBank, vpn / self.pages_per_sublk)
|
||||
pBlock = self.remap_block(ce, pBlock)
|
||||
bank_offset = self.bank_address_space * (pBlock / self.blocks_per_bank)
|
||||
page = self.pages_per_block_2 * (bank_offset + (pBlock % self.blocks_per_bank)) \
|
||||
+ ((vpn % self.pages_per_sublk) / self.banks_total)
|
||||
return ce, page
|
||||
|
||||
def read_single_page(self, vpn, key=None, lpn=None):
|
||||
ce, pPage = self.virtual_page_number_to_physical(vpn)
|
||||
#print "VFL read_single_page %d => %d, %d" % (vpn,ce,pPage)
|
||||
return self.nand.readPage(ce, pPage, key, lpn)
|
@ -0,0 +1,357 @@
|
||||
from array import array
|
||||
from construct.core import Struct, Union
|
||||
from construct.macros import *
|
||||
from progressbar import ProgressBar
|
||||
from structs import *
|
||||
import struct
|
||||
|
||||
|
||||
#https://github.com/iDroid-Project/openiBoot/blob/master/openiboot/ftl-yaftl/yaftl.c
|
||||
YAFTL_CXT = Struct("YAFTL_CXT",
|
||||
String("version", 4),
|
||||
ULInt32("unknCalculatedValue0"),
|
||||
ULInt32("totalPages"),
|
||||
ULInt32("latestUserBlock"),
|
||||
ULInt32("cxt_unkn0_usn"),
|
||||
ULInt32("latestIndexBlock"),
|
||||
ULInt32("maxIndexUsn"),
|
||||
ULInt32("blockStatsField4"),
|
||||
ULInt32("blockStatsField10"),
|
||||
ULInt32("numAllocatedBlocks"),
|
||||
ULInt32("numIAllocatedBlocks"),
|
||||
ULInt32("unk184_0xA"),
|
||||
Array(10, ULInt32("cxt_unkn1")),
|
||||
ULInt32("field_58"),
|
||||
ULInt16("tocArrayLength"),
|
||||
ULInt16("tocPagesPerBlock"),
|
||||
ULInt16("tocEntriesPerPage"),
|
||||
ULInt16("unkn_0x2A"),
|
||||
ULInt16("userPagesPerBlock"),
|
||||
ULInt16("unk64"),
|
||||
Array(11, ULInt32("cxt_unkn2")),
|
||||
ULInt8("unk188_0x63"),
|
||||
)
|
||||
|
||||
TOCStruct = Struct("TOCStruct",
|
||||
ULInt32("indexPage"),
|
||||
ULInt16("cacheNum"),
|
||||
ULInt16("TOCUnkMember2"),
|
||||
)
|
||||
|
||||
BlockStats = Struct("BlockStats",
|
||||
ULInt32("numAllocated"),
|
||||
ULInt32("field_4"),
|
||||
ULInt32("numValidDPages"),
|
||||
ULInt32("numIAllocated"),
|
||||
ULInt32("field_10"),
|
||||
ULInt32("numValidIPages"),
|
||||
ULInt32("numFree"),
|
||||
ULInt32("field_1C"),
|
||||
)
|
||||
|
||||
|
||||
class YAFTL(object):
|
||||
def __init__(self, vfl, usn=0):
|
||||
self.vfl = vfl
|
||||
self.lpnToVpn = None
|
||||
bytesPerPage = vfl.nand.pageSize
|
||||
numBlocks = vfl.context.usable_blocks_per_bank
|
||||
self.blankPage = bytesPerPage * "\x00"
|
||||
self.numBlocks = numBlocks
|
||||
self.tocPagesPerBlock = vfl.pages_per_sublk * 4 / bytesPerPage
|
||||
if vfl.pages_per_sublk * 4 % bytesPerPage:
|
||||
self.tocPagesPerBlock += 1
|
||||
self.tocEntriesPerPage = bytesPerPage / 4
|
||||
self.tocArrayLength = CEIL_DIVIDE(vfl.pages_per_sublk * numBlocks * 4, bytesPerPage)
|
||||
self.nPagesTocPageIndices = CEIL_DIVIDE(self.tocArrayLength * 4, bytesPerPage)
|
||||
self.nPagesBlockStatuses = CEIL_DIVIDE(numBlocks * 1, bytesPerPage)
|
||||
self.nPagesBlockReadCounts = CEIL_DIVIDE(numBlocks * 2, bytesPerPage)
|
||||
self.nPagesBlockEraseCounts = CEIL_DIVIDE(numBlocks * 4, bytesPerPage)
|
||||
self.nPagesBlockValidPagesDNumbers = self.nPagesBlockReadCounts
|
||||
self.nPagesBlockValidPagesINumbers = self.nPagesBlockReadCounts
|
||||
self.ctrlBlockPageOffset = self.nPagesTocPageIndices \
|
||||
+ self.nPagesBlockStatuses \
|
||||
+ self.nPagesBlockReadCounts \
|
||||
+ self.nPagesBlockEraseCounts \
|
||||
+ self.nPagesBlockValidPagesDNumbers \
|
||||
+ self.nPagesBlockValidPagesINumbers \
|
||||
+ 2 * self.tocPagesPerBlock \
|
||||
+ 2
|
||||
self.totalPages = (self.numBlocks - 8) * (self.vfl.pages_per_sublk - self.tocPagesPerBlock)# - unknCalculatedValue0
|
||||
self.userPagesPerBlock = self.vfl.pages_per_sublk - self.tocPagesPerBlock
|
||||
maxUsn = 0
|
||||
ftlCtrlBlock = -1
|
||||
for b in self.vfl.VFL_get_FTLCtrlBlock():
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk)
|
||||
if not d:
|
||||
continue
|
||||
if usn and s.usn > usn:
|
||||
break
|
||||
if s.usn > maxUsn:
|
||||
maxUsn = s.usn
|
||||
ftlCtrlBlock = b
|
||||
if ftlCtrlBlock == -1 or not maxUsn:
|
||||
print "ftlCtrlBlock not found, restore needed"
|
||||
self.YAFTL_restore()
|
||||
return
|
||||
i = 0
|
||||
maxUsn = 0
|
||||
while i < self.vfl.pages_per_sublk - self.ctrlBlockPageOffset:
|
||||
s,d = self.YAFTL_readPage(ftlCtrlBlock*self.vfl.pages_per_sublk + i + self.ctrlBlockPageOffset)
|
||||
if not d:
|
||||
if self.YAFTL_readCxtInfo(ftlCtrlBlock*self.vfl.pages_per_sublk + i):
|
||||
return
|
||||
print "YaFTL_readCxtInfo FAIL, restore needed maxUsn=%d" % maxUsn
|
||||
self.YAFTL_restore()
|
||||
return
|
||||
if s and s.usn > maxUsn:
|
||||
maxUsn = s.usn
|
||||
i += self.ctrlBlockPageOffset + 1
|
||||
print "YaFTL open fail"
|
||||
self.YAFTL_restore()
|
||||
|
||||
def readBTOCPages(self, block, maxVal):
|
||||
data = ""
|
||||
for i in xrange(self.tocPagesPerBlock):
|
||||
s,d = self.YAFTL_readPage((block+1) * self.vfl.pages_per_sublk - self.tocPagesPerBlock + i)
|
||||
if not s:
|
||||
return None
|
||||
data += d
|
||||
btoc = array("I",data)
|
||||
for i in xrange(len(btoc)):
|
||||
if btoc[i] > maxVal:
|
||||
btoc[i] = 0xFFFFFFFF
|
||||
return btoc
|
||||
|
||||
def YAFTL_restore(self):
|
||||
self.lpnToVpn = self.vfl.nand.loadCachedData("yaftlrestore")
|
||||
if self.lpnToVpn:
|
||||
print "Found cached FTL restore information"
|
||||
return
|
||||
userBlocks = {}
|
||||
indexBlocks = {}
|
||||
print "FTL restore in progress"
|
||||
pbar = ProgressBar(self.numBlocks)
|
||||
pbar.start()
|
||||
for b in xrange(0, self.numBlocks):
|
||||
pbar.update(b)
|
||||
#read fist page in block, if empty then block is empty
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + 0)
|
||||
if not s:
|
||||
continue
|
||||
if s.type == PAGETYPE_INDEX:
|
||||
indexBlocks[s.usn] = b
|
||||
elif s.type == PAGETYPE_LBN:
|
||||
if userBlocks.has_key(s.usn):
|
||||
print "Two blocks with same USN, something is weird"
|
||||
userBlocks[s.usn] = b
|
||||
elif s.type == PAGETYPE_FTL_CLEAN:
|
||||
pass
|
||||
pbar.finish()
|
||||
lpnToVpn = {}
|
||||
for usn in sorted(userBlocks.keys(), reverse=True):
|
||||
b = userBlocks[usn]
|
||||
btoc = self.readBTOCPages(b, self.totalPages)
|
||||
if btoc:
|
||||
for i in xrange(self.userPagesPerBlock-1,-1, -1):
|
||||
if not lpnToVpn.has_key(btoc[i]):
|
||||
lpnToVpn[btoc[i]] = b * self.vfl.pages_per_sublk + i
|
||||
else:
|
||||
print "BTOC not found for block %d (usn %d), scanning all pages" % (b, usn)
|
||||
i = 0
|
||||
for p in xrange(self.vfl.pages_per_sublk - self.tocPagesPerBlock -1, -1, -1):
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + p)
|
||||
if s:
|
||||
i+= 1
|
||||
if s and not lpnToVpn.has_key(s.lpn):
|
||||
lpnToVpn[s.lpn] = b * self.vfl.pages_per_sublk + p
|
||||
print "%d used pages in block" % i
|
||||
self.vfl.nand.cacheData("yaftlrestore", lpnToVpn)
|
||||
self.lpnToVpn = lpnToVpn
|
||||
return lpnToVpn
|
||||
|
||||
def YAFTL_readCxtInfo(self, page):
|
||||
s,d = self.YAFTL_readPage(page)
|
||||
if not s or s.type != PAGETYPE_FTL_CLEAN:
|
||||
return False
|
||||
ctx = YAFTL_CXT.parse(d)
|
||||
ctx.spareUsn = s.usn
|
||||
if ctx.version != "CX01":
|
||||
print "Wrong FTL version %s" % ctx.version
|
||||
return False
|
||||
self.usn = s.usn
|
||||
pageToRead = page + 1;
|
||||
userTOCBuffer = self.YAFTL_read_n_Page(pageToRead, self.tocPagesPerBlock)
|
||||
if not userTOCBuffer:
|
||||
raise(Exception("userTOCBuffer"))
|
||||
pageToRead += self.tocPagesPerBlock
|
||||
indexTOCBuffer = self.YAFTL_read_n_Page(pageToRead, self.tocPagesPerBlock)
|
||||
pageToRead += self.tocPagesPerBlock + 1
|
||||
tocArrayIndexPages = self.YAFTL_read_n_Page(pageToRead, self.nPagesTocPageIndices)
|
||||
self.tocArrayIndexPages = array("I", tocArrayIndexPages)
|
||||
assert self.tocArrayIndexPages.itemsize == 4
|
||||
self.indexCache = {}
|
||||
pageToRead += self.nPagesTocPageIndices
|
||||
|
||||
if False: #we don't care, we just want to read
|
||||
blockStatuses = self.YAFTL_read_n_Page(pageToRead, self.nPagesBlockStatuses)
|
||||
pageToRead += self.nPagesBlockStatuses
|
||||
blockReadCounts = self.YAFTL_read_n_Page(pageToRead, self.nPagesBlockReadCounts)
|
||||
pageToRead += self.nPagesBlockReadCounts
|
||||
blockEraseCounts = self.YAFTL_read_n_Page(pageToRead, self.nPagesBlockEraseCounts)
|
||||
pageToRead += self.nPagesBlockEraseCounts
|
||||
validPagesINo = self.YAFTL_read_n_Page(pageToRead, self.nPagesBlockValidPagesINumbers)
|
||||
pageToRead += self.nPagesBlockValidPagesINumbers
|
||||
validPagesDNo = self.YAFTL_read_n_Page(pageToRead, self.nPagesBlockValidPagesDNumbers)
|
||||
|
||||
print "YaFTL context OK, version=%s maxIndexUsn=%d context usn=%d" % (ctx.version, ctx.maxIndexUsn, self.usn)
|
||||
return True
|
||||
|
||||
def YAFTL_read_n_Page(self, page, n, failIfBlank=False):
|
||||
r = ""
|
||||
for i in xrange(0, n):
|
||||
s,d = self.YAFTL_readPage(page +i)
|
||||
if not d:
|
||||
if failIfBlank:
|
||||
return
|
||||
return r
|
||||
r += d
|
||||
return r
|
||||
|
||||
def YAFTL_readPage(self, page, key=META_KEY, lpn=None):
|
||||
return self.vfl.read_single_page(page, key, lpn)
|
||||
|
||||
def build_lpn_to_vpn(self):
|
||||
lpnToVpn = {}
|
||||
for p in xrange(self.totalPages):
|
||||
x = self.translateLPNtoVPN(p)
|
||||
if x != 0xFFFFFFFF:
|
||||
lpnToVpn[p] = x
|
||||
self.vfl.nand.cacheData("currentftl", lpnToVpn)
|
||||
return lpnToVpn
|
||||
|
||||
def translateLPNtoVPN(self, lpn):
|
||||
if self.lpnToVpn:
|
||||
return self.lpnToVpn.get(lpn, 0xFFFFFFFF)
|
||||
tocPageNum = (lpn) / self.tocEntriesPerPage
|
||||
indexPage = self.tocArrayIndexPages[tocPageNum]
|
||||
if indexPage == 0xffffffff:
|
||||
return 0xffffffff
|
||||
#print "indexPage %x" % indexPage
|
||||
if self.indexCache.has_key(indexPage):
|
||||
tocPageBuffer = self.indexCache[indexPage]
|
||||
else:
|
||||
s,tocPageBuffer = self.YAFTL_readPage(indexPage)
|
||||
if not tocPageBuffer:
|
||||
print "tocPageBuffer fail"
|
||||
return 0xffffffff
|
||||
assert s.type == PAGETYPE_INDEX
|
||||
tocPageBuffer = array("I", tocPageBuffer)
|
||||
self.indexCache[indexPage] = tocPageBuffer
|
||||
|
||||
tocEntry = tocPageBuffer[lpn % self.tocEntriesPerPage]
|
||||
return tocEntry
|
||||
|
||||
def readLPN(self, lpn, key=None):#, nPages):
|
||||
vpn = self.translateLPNtoVPN(lpn)
|
||||
if vpn == 0xffffffff:
|
||||
return self.blankPage
|
||||
#print "tocEntry %d" % tocEntry
|
||||
#print "FTL %d => %d" % (lpn, vpn)
|
||||
s,d = self.YAFTL_readPage(vpn, key, lpn)
|
||||
if d == None:
|
||||
return self.blankPage
|
||||
if s.lpn != lpn:
|
||||
raise Exception("YAFTL translation FAIL spare lpn=%d vs expected %d" % (s.lpn, lpn))
|
||||
return d
|
||||
|
||||
def YAFTL_lookup1(self):
|
||||
hax = self.vfl.nand.loadCachedData("YAFTL_lookup1")
|
||||
if hax:
|
||||
print "Found cached FTL lookup table"
|
||||
return hax
|
||||
userBlocks = {}
|
||||
indexBlocks = {}
|
||||
print "Building FTL lookup table v1"
|
||||
pbar = ProgressBar(self.numBlocks)
|
||||
pbar.start()
|
||||
for b in xrange(0, self.numBlocks):
|
||||
pbar.update(b)
|
||||
#read fist page in block, if empty then block is empty
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + 0)
|
||||
if not s:
|
||||
continue
|
||||
if s.type == PAGETYPE_INDEX:
|
||||
indexBlocks[s.usn] = b
|
||||
elif s.type == PAGETYPE_LBN:
|
||||
if userBlocks.has_key(s.usn):
|
||||
print "Two blocks with same USN, something is weird"
|
||||
userBlocks[s.usn] = b
|
||||
elif s.type == PAGETYPE_FTL_CLEAN:
|
||||
pass#print b, "ftl block"
|
||||
pbar.finish()
|
||||
lpnToVpn = {}
|
||||
for usn in sorted(userBlocks.keys(), reverse=False):
|
||||
b = userBlocks[usn]
|
||||
btoc = self.readBTOCPages(b, self.totalPages)
|
||||
#print usn, b
|
||||
if btoc:
|
||||
for i in xrange(self.userPagesPerBlock-1,-1, -1):
|
||||
lpnToVpn.setdefault(btoc[i], []).append(b * self.vfl.pages_per_sublk + i)
|
||||
else:
|
||||
#print "btoc not found for block %d (usn %d), scanning all pages" % (b, usn)
|
||||
i = 0
|
||||
usn = -1
|
||||
for p in xrange(self.vfl.pages_per_sublk - self.tocPagesPerBlock -1, -1, -1):
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + p)
|
||||
if not s:
|
||||
break
|
||||
i+= 1
|
||||
if usn == -1:
|
||||
usn = s.usn
|
||||
if usn != s.usn:
|
||||
#print "Two usns in same block %d %d" % (usn, s.usn)
|
||||
usn = s.usn
|
||||
lpnToVpn.setdefault(s.lpn, []).append(b * self.vfl.pages_per_sublk + p)
|
||||
#print "%d used pages in block" % i
|
||||
#self.vfl.nand.cacheData("YAFTL_lookup1", (lpnToVpn, userBlocks))
|
||||
return lpnToVpn, userBlocks
|
||||
|
||||
def YAFTL_hax2(self):
|
||||
hax = self.vfl.nand.loadCachedData("YAFTL_hax2")
|
||||
if hax:
|
||||
print "Found cached FTL HAX2 information"
|
||||
return hax
|
||||
|
||||
print "FTL hax2 in progress"
|
||||
pbar = ProgressBar(self.numBlocks)
|
||||
pbar.start()
|
||||
lpnToVpn = {}
|
||||
for b in xrange(0, self.numBlocks):
|
||||
pbar.update(b)
|
||||
#read fist page in block, if empty then block is empty (right?)
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + 0)
|
||||
if not s:
|
||||
continue
|
||||
if s.type == PAGETYPE_LBN:
|
||||
i = 0
|
||||
usn = -1
|
||||
for p in xrange(0, self.vfl.pages_per_sublk - self.tocPagesPerBlock):
|
||||
s,d = self.YAFTL_readPage(b * self.vfl.pages_per_sublk + p)
|
||||
if not s:
|
||||
break
|
||||
lpnToVpn.setdefault(s.lpn, {}).setdefault(s.usn, []).append(b * self.vfl.pages_per_sublk + p)
|
||||
i+= 1
|
||||
|
||||
pbar.finish()
|
||||
self.vfl.nand.cacheData("YAFTL_hax2", lpnToVpn)
|
||||
return lpnToVpn
|
||||
|
||||
def block_lpn_to_vpn(self, block):
|
||||
res = {}
|
||||
for p in xrange(0, self.vfl.pages_per_sublk - self.tocPagesPerBlock):
|
||||
s,d = self.YAFTL_readPage(block * self.vfl.pages_per_sublk + p)
|
||||
if not s:
|
||||
break
|
||||
res[s.lpn] = block * self.vfl.pages_per_sublk + p
|
||||
return res
|
@ -0,0 +1,246 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# usbmux.py - usbmux client library for Python
|
||||
#
|
||||
# Copyright (C) 2009 Hector Martin "marcan" <hector@marcansoft.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 2 or version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
import socket, struct, select, sys
|
||||
|
||||
try:
|
||||
import plistlib
|
||||
haveplist = True
|
||||
except:
|
||||
haveplist = False
|
||||
|
||||
class MuxError(Exception):
|
||||
pass
|
||||
|
||||
class MuxVersionError(MuxError):
|
||||
pass
|
||||
|
||||
class SafeStreamSocket:
|
||||
def __init__(self, address, family):
|
||||
self.sock = socket.socket(family, socket.SOCK_STREAM)
|
||||
self.sock.connect(address)
|
||||
def send(self, msg):
|
||||
totalsent = 0
|
||||
while totalsent < len(msg):
|
||||
sent = self.sock.send(msg[totalsent:])
|
||||
if sent == 0:
|
||||
raise MuxError("socket connection broken")
|
||||
totalsent = totalsent + sent
|
||||
def recv(self, size):
|
||||
msg = ''
|
||||
while len(msg) < size:
|
||||
chunk = self.sock.recv(size-len(msg))
|
||||
if chunk == '':
|
||||
raise MuxError("socket connection broken")
|
||||
msg = msg + chunk
|
||||
return msg
|
||||
|
||||
class MuxDevice(object):
|
||||
def __init__(self, devid, usbprod, serial, location):
|
||||
self.devid = devid
|
||||
self.usbprod = usbprod
|
||||
self.serial = serial
|
||||
self.location = location
|
||||
def __str__(self):
|
||||
return "<MuxDevice: ID %d ProdID 0x%04x Serial '%s' Location 0x%x>"%(self.devid, self.usbprod, self.serial, self.location)
|
||||
|
||||
class BinaryProtocol(object):
|
||||
TYPE_RESULT = 1
|
||||
TYPE_CONNECT = 2
|
||||
TYPE_LISTEN = 3
|
||||
TYPE_DEVICE_ADD = 4
|
||||
TYPE_DEVICE_REMOVE = 5
|
||||
VERSION = 0
|
||||
def __init__(self, socket):
|
||||
self.socket = socket
|
||||
self.connected = False
|
||||
|
||||
def _pack(self, req, payload):
|
||||
if req == self.TYPE_CONNECT:
|
||||
return struct.pack("IH", payload['DeviceID'], payload['PortNumber']) + "\x00\x00"
|
||||
elif req == self.TYPE_LISTEN:
|
||||
return ""
|
||||
else:
|
||||
raise ValueError("Invalid outgoing request type %d"%req)
|
||||
|
||||
def _unpack(self, resp, payload):
|
||||
if resp == self.TYPE_RESULT:
|
||||
return {'Number':struct.unpack("I", payload)[0]}
|
||||
elif resp == self.TYPE_DEVICE_ADD:
|
||||
devid, usbpid, serial, pad, location = struct.unpack("IH256sHI", payload)
|
||||
serial = serial.split("\0")[0]
|
||||
return {'DeviceID': devid, 'Properties': {'LocationID': location, 'SerialNumber': serial, 'ProductID': usbpid}}
|
||||
elif resp == self.TYPE_DEVICE_REMOVE:
|
||||
devid = struct.unpack("I", payload)[0]
|
||||
return {'DeviceID': devid}
|
||||
else:
|
||||
raise MuxError("Invalid incoming request type %d"%req)
|
||||
|
||||
def sendpacket(self, req, tag, payload={}):
|
||||
payload = self._pack(req, payload)
|
||||
if self.connected:
|
||||
raise MuxError("Mux is connected, cannot issue control packets")
|
||||
length = 16 + len(payload)
|
||||
data = struct.pack("IIII", length, self.VERSION, req, tag) + payload
|
||||
self.socket.send(data)
|
||||
def getpacket(self):
|
||||
if self.connected:
|
||||
raise MuxError("Mux is connected, cannot issue control packets")
|
||||
dlen = self.socket.recv(4)
|
||||
dlen = struct.unpack("I", dlen)[0]
|
||||
body = self.socket.recv(dlen - 4)
|
||||
version, resp, tag = struct.unpack("III",body[:0xc])
|
||||
if version != self.VERSION:
|
||||
raise MuxVersionError("Version mismatch: expected %d, got %d"%(self.VERSION,version))
|
||||
payload = self._unpack(resp, body[0xc:])
|
||||
return (resp, tag, payload)
|
||||
|
||||
class PlistProtocol(BinaryProtocol):
|
||||
TYPE_RESULT = "Result"
|
||||
TYPE_CONNECT = "Connect"
|
||||
TYPE_LISTEN = "Listen"
|
||||
TYPE_DEVICE_ADD = "Attached"
|
||||
TYPE_DEVICE_REMOVE = "Detached" #???
|
||||
TYPE_PLIST = 8
|
||||
VERSION = 1
|
||||
def __init__(self, socket):
|
||||
if not haveplist:
|
||||
raise Exception("You need the plistlib module")
|
||||
BinaryProtocol.__init__(self, socket)
|
||||
|
||||
def _pack(self, req, payload):
|
||||
return payload
|
||||
|
||||
def _unpack(self, resp, payload):
|
||||
return payload
|
||||
|
||||
def sendpacket(self, req, tag, payload={}):
|
||||
payload['ClientVersionString'] = 'usbmux.py by marcan'
|
||||
if isinstance(req, int):
|
||||
req = [self.TYPE_CONNECT, self.TYPE_LISTEN][req-2]
|
||||
payload['MessageType'] = req
|
||||
payload['ProgName'] = 'tcprelay'
|
||||
BinaryProtocol.sendpacket(self, self.TYPE_PLIST, tag, plistlib.writePlistToString(payload))
|
||||
def getpacket(self):
|
||||
resp, tag, payload = BinaryProtocol.getpacket(self)
|
||||
if resp != self.TYPE_PLIST:
|
||||
raise MuxError("Received non-plist type %d"%resp)
|
||||
payload = plistlib.readPlistFromString(payload)
|
||||
return payload['MessageType'], tag, payload
|
||||
|
||||
class MuxConnection(object):
|
||||
def __init__(self, socketpath, protoclass):
|
||||
self.socketpath = socketpath
|
||||
if sys.platform in ['win32', 'cygwin']:
|
||||
family = socket.AF_INET
|
||||
address = ('127.0.0.1', 27015)
|
||||
else:
|
||||
family = socket.AF_UNIX
|
||||
address = self.socketpath
|
||||
self.socket = SafeStreamSocket(address, family)
|
||||
self.proto = protoclass(self.socket)
|
||||
self.pkttag = 1
|
||||
self.devices = []
|
||||
|
||||
def _getreply(self):
|
||||
while True:
|
||||
resp, tag, data = self.proto.getpacket()
|
||||
if resp == self.proto.TYPE_RESULT:
|
||||
return tag, data
|
||||
else:
|
||||
raise MuxError("Invalid packet type received: %d"%resp)
|
||||
def _processpacket(self):
|
||||
resp, tag, data = self.proto.getpacket()
|
||||
if resp == self.proto.TYPE_DEVICE_ADD:
|
||||
self.devices.append(MuxDevice(data['DeviceID'], data['Properties']['ProductID'], data['Properties']['SerialNumber'], data['Properties']['LocationID']))
|
||||
elif resp == self.proto.TYPE_DEVICE_REMOVE:
|
||||
for dev in self.devices:
|
||||
if dev.devid == data['DeviceID']:
|
||||
self.devices.remove(dev)
|
||||
elif resp == self.proto.TYPE_RESULT:
|
||||
raise MuxError("Unexpected result: %d"%resp)
|
||||
else:
|
||||
raise MuxError("Invalid packet type received: %d"%resp)
|
||||
def _exchange(self, req, payload={}):
|
||||
mytag = self.pkttag
|
||||
self.pkttag += 1
|
||||
self.proto.sendpacket(req, mytag, payload)
|
||||
recvtag, data = self._getreply()
|
||||
if recvtag != mytag:
|
||||
raise MuxError("Reply tag mismatch: expected %d, got %d"%(mytag, recvtag))
|
||||
return data['Number']
|
||||
|
||||
def listen(self):
|
||||
ret = self._exchange(self.proto.TYPE_LISTEN)
|
||||
if ret != 0:
|
||||
raise MuxError("Listen failed: error %d"%ret)
|
||||
def process(self, timeout=None):
|
||||
if self.proto.connected:
|
||||
raise MuxError("Socket is connected, cannot process listener events")
|
||||
rlo, wlo, xlo = select.select([self.socket.sock], [], [self.socket.sock], timeout)
|
||||
if xlo:
|
||||
self.socket.sock.close()
|
||||
raise MuxError("Exception in listener socket")
|
||||
if rlo:
|
||||
self._processpacket()
|
||||
def connect(self, device, port):
|
||||
ret = self._exchange(self.proto.TYPE_CONNECT, {'DeviceID':device.devid, 'PortNumber':((port<<8) & 0xFF00) | (port>>8)})
|
||||
if ret != 0:
|
||||
raise MuxError("Connect failed: error %d"%ret)
|
||||
self.proto.connected = True
|
||||
return self.socket.sock
|
||||
def close(self):
|
||||
self.socket.sock.close()
|
||||
|
||||
class USBMux(object):
|
||||
def __init__(self, socketpath=None):
|
||||
if socketpath is None:
|
||||
if sys.platform == 'darwin':
|
||||
socketpath = "/var/run/usbmuxd"
|
||||
else:
|
||||
socketpath = "/var/run/usbmuxd"
|
||||
self.socketpath = socketpath
|
||||
self.listener = MuxConnection(socketpath, BinaryProtocol)
|
||||
try:
|
||||
self.listener.listen()
|
||||
self.version = 0
|
||||
self.protoclass = BinaryProtocol
|
||||
except MuxVersionError:
|
||||
self.listener = MuxConnection(socketpath, PlistProtocol)
|
||||
self.listener.listen()
|
||||
self.protoclass = PlistProtocol
|
||||
self.version = 1
|
||||
self.devices = self.listener.devices
|
||||
def process(self, timeout=None):
|
||||
self.listener.process(timeout)
|
||||
def connect(self, device, port):
|
||||
connector = MuxConnection(self.socketpath, self.protoclass)
|
||||
return connector.connect(device, port)
|
||||
|
||||
if __name__ == "__main__":
|
||||
mux = USBMux()
|
||||
print "Waiting for devices..."
|
||||
if not mux.devices:
|
||||
mux.process(0.1)
|
||||
while True:
|
||||
print "Devices:"
|
||||
for dev in mux.devices:
|
||||
print dev
|
||||
mux.process()
|
@ -0,0 +1,123 @@
|
||||
import glob
|
||||
import plistlib
|
||||
import os
|
||||
from bplist import BPlistReader
|
||||
import cPickle
|
||||
import gzip
|
||||
|
||||
def read_file(filename):
|
||||
f = open(filename, "rb")
|
||||
data = f.read()
|
||||
f.close()
|
||||
return data
|
||||
|
||||
def write_file(filename,data):
|
||||
f = open(filename, "wb")
|
||||
f.write(data)
|
||||
f.close()
|
||||
|
||||
def makedirs(dirs):
|
||||
try:
|
||||
os.makedirs(dirs)
|
||||
except:
|
||||
pass
|
||||
|
||||
def getHomePath(foldername, filename):
|
||||
home = os.path.expanduser('~')
|
||||
folderpath = os.path.join(home, foldername)
|
||||
if not os.path.exists(folderpath):
|
||||
makedirs(folderpath)
|
||||
return os.path.join(folderpath, filename)
|
||||
|
||||
def readHomeFile(foldername, filename):
|
||||
path = getHomePath(foldername, filename)
|
||||
if not os.path.exists(path):
|
||||
return None
|
||||
return read_file(path)
|
||||
|
||||
#return path to HOME+foldername+filename
|
||||
def writeHomeFile(foldername, filename, data):
|
||||
filepath = getHomePath(foldername, filename)
|
||||
write_file(filepath, data)
|
||||
return filepath
|
||||
|
||||
def readPlist(filename):
|
||||
f = open(filename,"rb")
|
||||
d = f.read(16)
|
||||
f.close()
|
||||
if d.startswith("bplist"):
|
||||
return BPlistReader.plistWithFile(filename)
|
||||
else:
|
||||
return plistlib.readPlist(filename)
|
||||
|
||||
def parsePlist(s):
|
||||
if s.startswith("bplist"):
|
||||
return BPlistReader.plistWithString(s)
|
||||
else:
|
||||
return plistlib.readPlistFromString(s)
|
||||
|
||||
#http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
|
||||
def sizeof_fmt(num):
|
||||
for x in ['bytes','KB','MB','GB','TB']:
|
||||
if num < 1024.0:
|
||||
return "%d%s" % (num, x)
|
||||
num /= 1024.0
|
||||
|
||||
#http://www.5dollarwhitebox.org/drupal/node/84
|
||||
def convert_bytes(bytes):
|
||||
bytes = float(bytes)
|
||||
if bytes >= 1099511627776:
|
||||
terabytes = bytes / 1099511627776
|
||||
size = '%.2fT' % terabytes
|
||||
elif bytes >= 1073741824:
|
||||
gigabytes = bytes / 1073741824
|
||||
size = '%.2fG' % gigabytes
|
||||
elif bytes >= 1048576:
|
||||
megabytes = bytes / 1048576
|
||||
size = '%.2fM' % megabytes
|
||||
elif bytes >= 1024:
|
||||
kilobytes = bytes / 1024
|
||||
size = '%.2fK' % kilobytes
|
||||
else:
|
||||
size = '%.2fb' % bytes
|
||||
return size
|
||||
|
||||
def xor_strings(a,b):
|
||||
r=""
|
||||
for i in xrange(len(a)):
|
||||
r+= chr(ord(a[i])^ord(b[i]))
|
||||
return r
|
||||
|
||||
hex = lambda data: " ".join("%02X" % ord(i) for i in data)
|
||||
ascii = lambda data: "".join(c if 31 < ord(c) < 127 else "." for c in data)
|
||||
|
||||
def hexdump(d):
|
||||
for i in xrange(0,len(d),16):
|
||||
data = d[i:i+16]
|
||||
print "%08X | %s | %s" % (i, hex(data).ljust(47), ascii(data))
|
||||
|
||||
def search_plist(directory, matchDict):
|
||||
for p in map(os.path.normpath, glob.glob(directory + "/*.plist")):
|
||||
try:
|
||||
d = plistlib.readPlist(p)
|
||||
ok = True
|
||||
for k,v in matchDict.items():
|
||||
if d.get(k) != v:
|
||||
ok = False
|
||||
break
|
||||
if ok:
|
||||
print "Using plist file %s" % p
|
||||
return d
|
||||
except:
|
||||
continue
|
||||
|
||||
def save_pickle(filename,data):
|
||||
f = gzip.open(filename,"wb")
|
||||
cPickle.dump(data, f, cPickle.HIGHEST_PROTOCOL)
|
||||
f.close()
|
||||
|
||||
def load_pickle(filename):
|
||||
f = gzip.open(filename,"rb")
|
||||
data = cPickle.load(f)
|
||||
f.close()
|
||||
return data
|
@ -0,0 +1,29 @@
|
||||
|
||||
def print_table(title, headers, rows):
|
||||
widths = []
|
||||
|
||||
for i in xrange(len(headers)):
|
||||
z = map(len, [str(row[i]) for row in rows])
|
||||
z.append(len(headers[i]))
|
||||
widths.append(max(z))
|
||||
|
||||
width = sum(widths) + len(headers) + 1
|
||||
print "-"* width
|
||||
print "|" + title.center(width-2) + "|"
|
||||
print "-"* width
|
||||
hline = "|"
|
||||
for i in xrange(len(headers)):
|
||||
hline += headers[i].ljust(widths[i]) + "|"
|
||||
print hline
|
||||
|
||||
print "-"* width
|
||||
for row in rows:
|
||||
line = "|"
|
||||
for i in xrange(len(row)):
|
||||
line += str(row[i]).ljust(widths[i]) + "|"
|
||||
print line
|
||||
|
||||
if len(rows) == 0:
|
||||
print "|" + "No entries".center(width-2) + "|"
|
||||
print "-"* width
|
||||
print ""
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user