github.com/n00py/Slackor@v0.0.0-20200610224921-d007fcea1740/impacket/examples/ntfs-read.py (about) 1 #!/usr/bin/env python 2 # SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved. 3 # 4 # This software is provided under under a slightly modified version 5 # of the Apache Software License. See the accompanying LICENSE file 6 # for more information. 7 # 8 # Description: Mini shell for browsing an NTFS volume 9 # 10 # Author: 11 # Alberto Solino (@agsolino) 12 # 13 # 14 # Reference for: 15 # Structure. Quick and dirty implementation.. just for fun.. ;) 16 # 17 # NOTE: Lots of info (mainly the structs) taken from the NTFS-3G project.. 18 # 19 # TODO 20 # [] Parse the attributes list attribute. It is unknown what would happen now if 21 # we face a highly fragmented file that will have many attributes that won't fit 22 # in the MFT Record 23 # [] Support compressed, encrypted and sparse files 24 # 25 from __future__ import division 26 from __future__ import print_function 27 import os 28 import sys 29 import logging 30 import struct 31 import argparse 32 import cmd 33 import ntpath 34 # If you wanna have readline like functionality in Windows, install pyreadline 35 try: 36 import pyreadline as readline 37 except ImportError: 38 import readline 39 from six import PY2, text_type 40 from datetime import datetime 41 from impacket.examples import logger 42 from impacket import version 43 from impacket.structure import Structure 44 45 46 def pretty_print(x): 47 visible = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ ' 48 return x if x in visible else '.' 49 50 def hexdump(data): 51 x = str(data) 52 strLen = len(x) 53 i = 0 54 while i < strLen: 55 print("%04x " % i, end=' ') 56 for j in range(16): 57 if i+j < strLen: 58 print("%02X" % ord(x[i+j]), end=' ') 59 else: 60 print(" ", end=' ') 61 if j%16 == 7: 62 print("", end=' ') 63 print(" ", end=' ') 64 print(''.join(pretty_print(x) for x in x[i:i+16] )) 65 i += 16 66 67 # Reserved/fixed MFTs 68 FIXED_MFTS = 16 69 70 # Attribute types 71 UNUSED = 0 72 STANDARD_INFORMATION = 0x10 73 ATTRIBUTE_LIST = 0x20 74 FILE_NAME = 0x30 75 OBJECT_ID = 0x40 76 SECURITY_DESCRIPTOR = 0x50 77 VOLUME_NAME = 0x60 78 VOLUME_INFORMATION = 0x70 79 DATA = 0x80 80 INDEX_ROOT = 0x90 81 INDEX_ALLOCATION = 0xa0 82 BITMAP = 0xb0 83 REPARSE_POINT = 0xc0 84 EA_INFORMATION = 0xd0 85 EA = 0xe0 86 PROPERTY_SET = 0xf0 87 LOGGED_UTILITY_STREAM = 0x100 88 FIRST_USER_DEFINED_ATTRIBUTE = 0x1000 89 END = 0xffffffff 90 91 # Attribute flags 92 ATTR_IS_COMPRESSED = 0x0001 93 ATTR_COMPRESSION_MASK = 0x00ff 94 ATTR_IS_ENCRYPTED = 0x4000 95 ATTR_IS_SPARSE = 0x8000 96 97 # FileName type flags 98 FILE_NAME_POSIX = 0x00 99 FILE_NAME_WIN32 = 0x01 100 FILE_NAME_DOS = 0x02 101 FILE_NAME_WIN32_AND_DOS = 0x03 102 103 # MFT Record flags 104 MFT_RECORD_IN_USE = 0x0001 105 MFT_RECORD_IS_DIRECTORY = 0x0002 106 MFT_RECORD_IS_4 = 0x0004 107 MFT_RECORD_IS_VIEW_INDEX = 0x0008 108 MFT_REC_SPACE_FILLER = 0xfffff 109 110 # File Attribute Flags 111 FILE_ATTR_READONLY = 0x0001 112 FILE_ATTR_HIDDEN = 0x0002 113 FILE_ATTR_SYSTEM = 0x0004 114 FILE_ATTR_DIRECTORY = 0x0010 115 FILE_ATTR_ARCHIVE = 0x0020 116 FILE_ATTR_DEVICE = 0x0040 117 FILE_ATTR_NORMAL = 0x0080 118 FILE_ATTR_TEMPORARY = 0x0100 119 FILE_ATTR_SPARSE_FILE = 0x0200 120 FILE_ATTR_REPARSE_POINT = 0x0400 121 FILE_ATTR_COMPRESSED = 0x0800 122 FILE_ATTR_OFFLINE = 0x1000 123 FILE_ATTR_NOT_CONTENT_INDEXED = 0x2000 124 FILE_ATTR_ENCRYPTED = 0x4000 125 FILE_ATTR_VALID_FLAGS = 0x7fb7 126 FILE_ATTR_VALID_SET_FLAGS = 0x31a7 127 FILE_ATTR_I30_INDEX_PRESENT = 0x10000000 128 FILE_ATTR_VIEW_INDEX_PRESENT = 0x20000000 129 130 # NTFS System files 131 FILE_MFT = 0 132 FILE_MFTMirr = 1 133 FILE_LogFile = 2 134 FILE_Volume = 3 135 FILE_AttrDef = 4 136 FILE_Root = 5 137 FILE_Bitmap = 6 138 FILE_Boot = 7 139 FILE_BadClus = 8 140 FILE_Secure = 9 141 FILE_UpCase = 10 142 FILE_Extend = 11 143 144 # Index Header Flags 145 SMALL_INDEX = 0 146 LARGE_INDEX = 1 147 LEAF_NODE = 0 148 INDEX_NODE = 1 149 NODE_MASK = 0 150 151 # Index Entry Flags 152 INDEX_ENTRY_NODE = 1 153 INDEX_ENTRY_END = 2 154 INDEX_ENTRY_SPACE_FILLER = 0xffff 155 156 157 class NTFS_BPB(Structure): 158 structure = ( 159 ('BytesPerSector','<H=0'), 160 ('SectorsPerCluster','B=0'), 161 ('ReservedSectors','<H=0'), 162 ('Reserved','3s=b""'), 163 ('Reserved2','2s=b""'), 164 ('MediaDescription','B=0'), 165 ('Reserved3','2s=b""'), 166 ('Reserved4','<H=0'), 167 ('Reserved5','<H=0'), 168 ('Reserved6','<L=0'), 169 ('Reserved7','4s=b""'), 170 ) 171 172 class NTFS_EXTENDED_BPB(Structure): 173 structure = ( 174 ('Reserved','4s=b""'), 175 ('TotalSectors','<Q=0'), 176 ('MFTClusterNumber','<Q=0'), 177 ('MFTMirrClusterNumber','<Q=0'), 178 ('ClusterPerFileRecord','b=0'), 179 ('Reserved2','3s=b""'), 180 ('ClusterPerIndexBuffer','<b=0'), 181 ('Reserved3','3s=b""'), 182 ('VolumeSerialNumber','8s=b""'), 183 ('CheckSum','4s=b""'), 184 ) 185 186 class NTFS_BOOT_SECTOR(Structure): 187 structure = ( 188 ('JmpInstr','3s=b""'), 189 ('OEM_ID','8s=b""'), 190 ('BPB','25s=b""'), 191 ('ExtendedBPB','48s=b""'), 192 ('Bootstrap','426s=b""'), 193 ('EOS','<H=0'), 194 ) 195 196 class NTFS_MFT_RECORD(Structure): 197 structure = ( 198 ('MagicLabel','4s=b""'), 199 ('USROffset','<H=0'), # Update Sequence Records Offset 200 ('USRSize','<H=0'), # Update Sequence Records Size 201 ('LogSeqNum','<Q=0'), 202 ('SeqNum','<H=0'), 203 ('LinkCount','<H=0'), 204 ('AttributesOffset','<H=0'), 205 ('Flags','<H=0'), 206 ('BytesInUse','<L=0'), 207 ('BytesAllocated','<L=0'), 208 ('BaseMftRecord','<Q=0'), 209 ('NextAttrInstance','<H=0'), 210 ('Reserved','<H=0'), 211 ('RecordNumber','<L=0'), 212 ) 213 214 class NTFS_ATTRIBUTE_RECORD(Structure): 215 commonHdr = ( 216 ('Type','<L=0'), 217 ('Length','<L=0'), 218 ('NonResident','B=0'), 219 ('NameLength','B=0'), 220 ('NameOffset','<H=0'), 221 ('Flags','<H=0'), 222 ('Instance','<H=0'), 223 ) 224 structure = () 225 226 class NTFS_ATTRIBUTE_RECORD_NON_RESIDENT(Structure): 227 structure = ( 228 ('LowestVCN','<Q=0'), 229 ('HighestVCN','<Q=0'), 230 ('DataRunsOffset','<H=0'), 231 ('CompressionUnit','<H=0'), 232 ('Reserved1','4s=""'), 233 ('AllocatedSize','<Q=0'), 234 ('DataSize','<Q=0'), 235 ('InitializedSize','<Q=0'), 236 # ('CompressedSize','<Q=0'), 237 ) 238 239 class NTFS_ATTRIBUTE_RECORD_RESIDENT(Structure): 240 structure = ( 241 ('ValueLen','<L=0'), 242 ('ValueOffset','<H=0'), 243 ('Flags','B=0'), 244 ('Reserved','B=0'), 245 ) 246 247 class NTFS_FILE_NAME_ATTR(Structure): 248 structure = ( 249 ('ParentDirectory','<Q=0'), 250 ('CreationTime','<Q=0'), 251 ('LastDataChangeTime','<Q=0'), 252 ('LastMftChangeTime','<Q=0'), 253 ('LastAccessTime','<Q=0'), 254 ('AllocatedSize','<Q=0'), 255 ('DataSize','<Q=0'), 256 ('FileAttributes','<L=0'), 257 ('EaSize','<L=0'), 258 ('FileNameLen','B=0'), 259 ('FileNameType','B=0'), 260 ('_FileName','_-FileName','self["FileNameLen"]*2'), 261 ('FileName',':'), 262 ) 263 264 class NTFS_STANDARD_INFORMATION(Structure): 265 structure = ( 266 ('CreationTime','<Q=0'), 267 ('LastDataChangeTime','<Q=0'), 268 ('LastMftChangeTime','<Q=0'), 269 ('LastAccessTime','<Q=0'), 270 ('FileAttributes','<L=0'), 271 ) 272 273 class NTFS_INDEX_HEADER(Structure): 274 structure = ( 275 ('EntriesOffset','<L=0'), 276 ('IndexLength','<L=0'), 277 ('AllocatedSize','<L=0'), 278 ('Flags','B=0'), 279 ('Reserved','3s=b""'), 280 ) 281 282 class NTFS_INDEX_ROOT(Structure): 283 structure = ( 284 ('Type','<L=0'), 285 ('CollationRule','<L=0'), 286 ('IndexBlockSize','<L=0'), 287 ('ClustersPerIndexBlock','B=0'), 288 ('Reserved','3s=b""'), 289 ('Index',':',NTFS_INDEX_HEADER), 290 ) 291 292 293 class NTFS_INDEX_ALLOCATION(Structure): 294 structure = ( 295 ('Magic','4s=b""'), 296 ('USROffset','<H=0'), # Update Sequence Records Offset 297 ('USRSize','<H=0'), # Update Sequence Records Size 298 ('Lsn','<Q=0'), 299 ('IndexVcn','<Q=0'), 300 ('Index',':',NTFS_INDEX_HEADER), 301 ) 302 303 class NTFS_INDEX_ENTRY_HEADER(Structure): 304 structure = ( 305 ('IndexedFile','<Q=0'), 306 ('Length','<H=0'), 307 ('KeyLength','<H=0'), 308 ('Flags','<H=0'), 309 ('Reserved','<H=0'), 310 ) 311 312 class NTFS_INDEX_ENTRY(Structure): 313 alignment = 8 314 structure = ( 315 ('EntryHeader',':',NTFS_INDEX_ENTRY_HEADER), 316 ('_Key','_-Key','self["EntryHeader"]["KeyLength"]'), 317 ('Key',':'), 318 ('_Vcn','_-Vcn','(self["EntryHeader"]["Flags"] & 1)*8'), 319 ('Vcn',':') 320 ) 321 322 class NTFS_DATA_RUN(Structure): 323 structure = ( 324 ('LCN','<q=0'), 325 ('Clusters','<Q=0'), 326 ('StartVCN','<Q=0'), 327 ('LastVCN','<Q=0'), 328 ) 329 330 def getUnixTime(t): 331 t -= 116444736000000000 332 t //= 10000000 333 return t 334 335 336 class Attribute: 337 def __init__(self, iNode, data): 338 self.AttributeName = None 339 self.NTFSVolume = iNode.NTFSVolume 340 self.AttributeHeader = NTFS_ATTRIBUTE_RECORD(data) 341 if self.AttributeHeader['NameLength'] > 0 and self.AttributeHeader['Type'] != END: 342 self.AttributeName = data[self.AttributeHeader['NameOffset']:][:self.AttributeHeader['NameLength']*2].decode('utf-16le') 343 344 def getFlags(self): 345 return self.AttributeHeader['Flags'] 346 347 def getName(self): 348 return self.AttributeName 349 350 def isNonResident(self): 351 return self.AttributeHeader['NonResident'] 352 353 def dump(self): 354 return self.AttributeHeader.dump() 355 356 def getTotalSize(self): 357 return self.AttributeHeader['Length'] 358 359 def getType(self): 360 return self.AttributeHeader['Type'] 361 362 class AttributeResident(Attribute): 363 def __init__(self, iNode, data): 364 logging.debug("Inside AttributeResident: iNode: %s" % iNode.INodeNumber) 365 Attribute.__init__(self,iNode,data) 366 self.ResidentHeader = NTFS_ATTRIBUTE_RECORD_RESIDENT(data[len(self.AttributeHeader):]) 367 self.AttrValue = data[self.ResidentHeader['ValueOffset']:][:self.ResidentHeader['ValueLen']] 368 369 def dump(self): 370 return self.ResidentHeader.dump() 371 372 def getFlags(self): 373 return self.ResidentHeader['Flags'] 374 375 def getValue(self): 376 return self.AttrValue 377 378 def read(self,offset,length): 379 logging.debug("Inside Read: offset: %d, length: %d" %(offset,length)) 380 return self.AttrValue[offset:][:length] 381 382 def getDataSize(self): 383 return len(self.AttrValue) 384 385 class AttributeNonResident(Attribute): 386 def __init__(self, iNode, data): 387 logging.debug("Inside AttributeNonResident: iNode: %s" % iNode.INodeNumber) 388 Attribute.__init__(self,iNode,data) 389 self.NonResidentHeader = NTFS_ATTRIBUTE_RECORD_NON_RESIDENT(data[len(self.AttributeHeader):]) 390 self.AttrValue = data[self.NonResidentHeader['DataRunsOffset']:][:self.NonResidentHeader['AllocatedSize']] 391 self.DataRuns = [] 392 self.ClusterSize = 0 393 self.parseDataRuns() 394 395 def dump(self): 396 return self.NonResidentHeader.dump() 397 398 def getDataSize(self): 399 return self.NonResidentHeader['InitializedSize'] 400 401 def getValue(self): 402 return None 403 404 def parseDataRuns(self): 405 value = self.AttrValue 406 if value is not None: 407 VCN = 0 408 LCN = 0 409 LCNOffset = 0 410 while value[0:1] != b'\x00': 411 LCN += LCNOffset 412 dr = NTFS_DATA_RUN() 413 414 size = struct.unpack('B',(value[0:1]))[0] 415 416 value = value[1:] 417 418 lengthBytes = size & 0x0F 419 offsetBytes = size >> 4 420 421 length = value[:lengthBytes] 422 length = struct.unpack('<Q', value[:lengthBytes]+b'\x00'*(8-len(length)))[0] 423 value = value[lengthBytes:] 424 425 fillWith = b'\x00' 426 if struct.unpack('B',value[offsetBytes-1:offsetBytes])[0] & 0x80: 427 fillWith = b'\xff' 428 LCNOffset = value[:offsetBytes]+fillWith*(8-len(value[:offsetBytes])) 429 LCNOffset = struct.unpack('<q',LCNOffset)[0] 430 431 value = value[offsetBytes:] 432 433 dr['LCN'] = LCN+LCNOffset 434 dr['Clusters'] = length 435 dr['StartVCN'] = VCN 436 dr['LastVCN'] = VCN + length -1 437 438 VCN += length 439 self.DataRuns.append(dr) 440 441 if len(value) == 0: 442 break 443 444 def readClusters(self, clusters, lcn): 445 logging.debug("Inside ReadClusters: clusters:%d, lcn:%d" % (clusters,lcn)) 446 if lcn == -1: 447 return '\x00'*clusters*self.ClusterSize 448 self.NTFSVolume.volumeFD.seek(lcn*self.ClusterSize,0) 449 buf = self.NTFSVolume.volumeFD.read(clusters*self.ClusterSize) 450 while len(buf) < clusters*self.ClusterSize: 451 buf+= self.NTFSVolume.volumeFD.read((clusters*self.ClusterSize)-len(buf)) 452 453 if len(buf) == 0: 454 return None 455 456 return buf 457 458 def readVCN(self, vcn, numOfClusters): 459 logging.debug("Inside ReadVCN: vcn: %d, numOfClusters: %d" % (vcn,numOfClusters)) 460 buf = b'' 461 clustersLeft = numOfClusters 462 for dr in self.DataRuns: 463 if (vcn >= dr['StartVCN']) and (vcn <= dr['LastVCN']): 464 465 vcnsToRead = dr['LastVCN'] - vcn + 1 466 467 # Are we requesting to read more data outside this DataRun? 468 if numOfClusters > vcnsToRead: 469 # Yes 470 clustersToRead = vcnsToRead 471 else: 472 clustersToRead = numOfClusters 473 474 tmpBuf = self.readClusters(clustersToRead,dr['LCN']+(vcn-dr['StartVCN'])) 475 if tmpBuf is not None: 476 buf += tmpBuf 477 clustersLeft -= clustersToRead 478 vcn += clustersToRead 479 else: 480 break 481 if clustersLeft == 0: 482 break 483 return buf 484 485 def read(self,offset,length): 486 logging.debug("Inside Read: offset: %d, length: %d" %(offset,length)) 487 488 buf = b'' 489 curLength = length 490 self.ClusterSize = self.NTFSVolume.BPB['BytesPerSector']*self.NTFSVolume.BPB['SectorsPerCluster'] 491 492 # Given the offset, let's calculate what VCN should be the first one to read 493 vcnToStart = offset // self.ClusterSize 494 #vcnOffset = self.ClusterSize - (offset % self.ClusterSize) 495 496 # Do we have to read partial VCNs? 497 if offset % self.ClusterSize: 498 # Read the whole VCN 499 bufTemp = self.readVCN(vcnToStart, 1) 500 if bufTemp is b'': 501 # Something went wrong 502 return None 503 buf = bufTemp[offset % self.ClusterSize:] 504 curLength -= len(buf) 505 vcnToStart += 1 506 507 # Finished? 508 if curLength <= 0: 509 return buf[:length] 510 511 # First partial cluster read.. now let's keep reading full clusters 512 # Data left to be read is bigger than a Cluster? 513 if curLength // self.ClusterSize: 514 # Yep.. so let's read full clusters 515 bufTemp = self.readVCN(vcnToStart, curLength // self.ClusterSize) 516 if bufTemp is b'': 517 # Something went wrong 518 return None 519 if len(bufTemp) > curLength: 520 # Too much data read, taking something off 521 buf = buf + bufTemp[:curLength] 522 else: 523 buf = buf + bufTemp 524 vcnToStart += curLength // self.ClusterSize 525 curLength -= len(bufTemp) 526 527 # Is there anything else left to be read in the last cluster? 528 if curLength > 0: 529 bufTemp = self.readVCN(vcnToStart, 1) 530 buf = buf + bufTemp[:curLength] 531 532 if buf == b'': 533 return None 534 else: 535 return buf 536 537 class AttributeStandardInfo: 538 def __init__(self, attribute): 539 logging.debug("Inside AttributeStandardInfo") 540 self.Attribute = attribute 541 self.StandardInfo = NTFS_STANDARD_INFORMATION(self.Attribute.AttrValue) 542 543 def getFileAttributes(self): 544 return self.StandardInfo['FileAttributes'] 545 546 def getFileTime(self): 547 if self.StandardInfo['LastDataChangeTime'] > 0: 548 return datetime.fromtimestamp(getUnixTime(self.StandardInfo['LastDataChangeTime'])) 549 else: 550 return 0 551 552 def dump(self): 553 return self.StandardInfo.dump() 554 555 class AttributeFileName: 556 def __init__(self, attribute): 557 logging.debug("Inside AttributeFileName") 558 self.Attribute = attribute 559 self.FileNameRecord = NTFS_FILE_NAME_ATTR(self.Attribute.AttrValue) 560 561 def getFileNameType(self): 562 return self.FileNameRecord['FileNameType'] 563 564 def getFileAttributes(self): 565 return self.FileNameRecord['FileAttributes'] 566 567 def getFileName(self): 568 return self.FileNameRecord['FileName'].decode('utf-16le') 569 570 def getFileSize(self): 571 return self.FileNameRecord['DataSize'] 572 573 def getFlags(self): 574 return self.FileNameRecord['FileAttributes'] 575 576 def dump(self): 577 return self.FileNameRecord.dump() 578 579 class AttributeIndexAllocation: 580 def __init__(self, attribute): 581 logging.debug("Inside AttributeIndexAllocation") 582 self.Attribute = attribute 583 584 def dump(self): 585 print(self.Attribute.dump()) 586 for i in self.Attribute.DataRuns: 587 print(i.dump()) 588 589 def read(self, offset, length): 590 return self.Attribute.read(offset, length) 591 592 593 class AttributeIndexRoot: 594 def __init__(self, attribute): 595 logging.debug("Inside AttributeIndexRoot") 596 self.Attribute = attribute 597 self.IndexRootRecord = NTFS_INDEX_ROOT(attribute.AttrValue) 598 self.IndexEntries = [] 599 self.parseIndexEntries() 600 601 def parseIndexEntries(self): 602 data = self.Attribute.AttrValue[len(self.IndexRootRecord):] 603 while True: 604 ie = IndexEntry(data) 605 self.IndexEntries.append(ie) 606 if ie.isLastNode(): 607 break 608 data = data[ie.getSize():] 609 610 def dump(self): 611 self.IndexRootRecord.dump() 612 for i in self.IndexEntries: 613 i.dump() 614 615 def getType(self): 616 return self.IndexRootRecord['Type'] 617 618 class IndexEntry: 619 def __init__(self, entry): 620 self.entry = NTFS_INDEX_ENTRY(entry) 621 622 def isSubNode(self): 623 return self.entry['EntryHeader']['Flags'] & INDEX_ENTRY_NODE 624 625 def isLastNode(self): 626 return self.entry['EntryHeader']['Flags'] & INDEX_ENTRY_END 627 628 def getVCN(self): 629 return struct.unpack('<Q', self.entry['Vcn'])[0] 630 631 def getSize(self): 632 return len(self.entry) 633 634 def getKey(self): 635 return self.entry['Key'] 636 637 def getINodeNumber(self): 638 return self.entry['EntryHeader']['IndexedFile'] & 0x0000FFFFFFFFFFFF 639 640 def dump(self): 641 self.entry.dump() 642 643 class INODE: 644 def __init__(self, NTFSVolume): 645 self.NTFSVolume = NTFSVolume 646 # This is the entire file record 647 self.INodeNumber = None 648 self.Attributes = {} 649 self.AttributesRaw = None 650 self.AttributesLastPos = None 651 # Some interesting Attributes to parse 652 self.FileAttributes = 0 653 self.LastDataChangeTime = None 654 self.FileName = None 655 self.FileSize = 0 656 657 def isDirectory(self): 658 return self.FileAttributes & FILE_ATTR_I30_INDEX_PRESENT 659 660 def isCompressed(self): 661 return self.FileAttributes & FILE_ATTR_COMPRESSED 662 663 def isEncrypted(self): 664 return self.FileAttributes & FILE_ATTR_ENCRYPTED 665 666 def isSparse(self): 667 return self.FileAttributes & FILE_ATTR_SPARSE_FILE 668 669 def displayName(self): 670 if self.LastDataChangeTime is not None and self.FileName is not None: 671 try: 672 # print "%d - %s %s %s " %( self.INodeNumber, self.getPrintableAttributes(), self.LastDataChangeTime.isoformat(' '), self.FileName) 673 print("%s %s %15d %s " %( self.getPrintableAttributes(), self.LastDataChangeTime.isoformat(' '), self.FileSize, self.FileName)) 674 except Exception as e: 675 logging.error('Exception when trying to display inode %d: %s' % (self.INodeNumber,str(e))) 676 677 def getPrintableAttributes(self): 678 mask = '' 679 if self.FileAttributes & FILE_ATTR_I30_INDEX_PRESENT: 680 mask += 'd' 681 else: 682 mask += '-' 683 if self.FileAttributes & FILE_ATTR_HIDDEN: 684 mask += 'h' 685 else: 686 mask += '-' 687 if self.FileAttributes & FILE_ATTR_SYSTEM: 688 mask += 'S' 689 else: 690 mask += '-' 691 if self.isCompressed(): 692 mask += 'C' 693 else: 694 mask += '-' 695 if self.isEncrypted(): 696 mask += 'E' 697 else: 698 mask += '-' 699 if self.isSparse(): 700 mask += 's' 701 else: 702 mask += '-' 703 return mask 704 705 def parseAttributes(self): 706 # Parse Standard Info 707 attr = self.searchAttribute(STANDARD_INFORMATION, None) 708 if attr is not None: 709 si = AttributeStandardInfo(attr) 710 self.Attributes[STANDARD_INFORMATION] = si 711 self.FileAttributes |= si.getFileAttributes() 712 self.LastDataChangeTime = si.getFileTime() 713 self.Attributes[STANDARD_INFORMATION] = si 714 715 # Parse Filename 716 attr = self.searchAttribute(FILE_NAME, None) 717 while attr is not None: 718 fn = AttributeFileName(attr) 719 if fn.getFileNameType() != FILE_NAME_DOS: 720 self.FileName = fn.getFileName() 721 self.FileSize = fn.getFileSize() 722 self.FileAttributes |= fn.getFileAttributes() 723 self.Attributes[FILE_NAME] = fn 724 break 725 attr = self.searchAttribute(FILE_NAME, None, True) 726 727 # Parse Index Allocation 728 attr = self.searchAttribute(INDEX_ALLOCATION, u'$I30') 729 if attr is not None: 730 ia = AttributeIndexAllocation(attr) 731 self.Attributes[INDEX_ALLOCATION] = ia 732 733 attr = self.searchAttribute(INDEX_ROOT, u'$I30') 734 if attr is not None: 735 ir = AttributeIndexRoot(attr) 736 self.Attributes[INDEX_ROOT] = ir 737 738 def searchAttribute(self, attributeType, attributeName, findNext = False): 739 logging.debug("Inside searchAttribute: type: 0x%x, name: %s" % (attributeType, attributeName)) 740 record = None 741 742 if findNext is True: 743 data = self.AttributesLastPos 744 else: 745 data = self.AttributesRaw 746 747 while True: 748 749 if len(data) <= 8: 750 record = None 751 break 752 753 record = Attribute(self,data) 754 755 if record.getType() == END: 756 record = None 757 break 758 759 if record.getTotalSize() == 0: 760 record = None 761 break 762 763 if record.getType() == attributeType and record.getName() == attributeName: 764 if record.isNonResident() == 1: 765 record = AttributeNonResident(self, data) 766 else: 767 record = AttributeResident(self, data) 768 769 self.AttributesLastPos = data[record.getTotalSize():] 770 771 break 772 773 data = data[record.getTotalSize():] 774 775 return record 776 777 def PerformFixUp(self, record, buf, numSectors): 778 # It fixes the sequence WORDS on every sector of a cluster 779 # FixUps are used by: 780 # FILE Records in the $MFT 781 # INDX Records in directories and other indexes 782 # RCRD Records in the $LogFile 783 # RSTR Records in the $LogFile 784 785 logging.debug("Inside PerformFixUp..." ) 786 magicNum = struct.unpack('<H',buf[record['USROffset']:][:2])[0] 787 sequenceArray = buf[record['USROffset']+2:][:record['USRSize']*2] 788 789 dataList = list(buf) 790 index = 0 791 for i in range(0,numSectors*2, 2): 792 index += self.NTFSVolume.SectorSize-2 793 # Let's get the last two bytes of the sector 794 lastBytes = struct.unpack('<H', buf[index:][:2])[0] 795 # Is it the same as the magicNum? 796 if lastBytes != magicNum: 797 logging.error("Magic number 0x%x doesn't match with 0x%x" % (magicNum,lastBytes)) 798 return None 799 # Now let's replace the original bytes 800 dataList[index] = sequenceArray[i] 801 dataList[index+1] = sequenceArray[i+1] 802 index += 2 803 804 if PY2: 805 return "".join(dataList) 806 else: 807 return bytes(dataList) 808 809 def parseIndexBlocks(self, vcn): 810 IndexEntries = [] 811 #sectors = self.NTFSVolume.IndexBlockSize / self.NTFSVolume.SectorSize 812 if INDEX_ALLOCATION in self.Attributes: 813 ia = self.Attributes[INDEX_ALLOCATION] 814 data = ia.read(vcn*self.NTFSVolume.IndexBlockSize, self.NTFSVolume.IndexBlockSize) 815 if data: 816 iaRec = NTFS_INDEX_ALLOCATION(data) 817 sectorsPerIB = self.NTFSVolume.IndexBlockSize // self.NTFSVolume.SectorSize 818 data = self.PerformFixUp(iaRec, data, sectorsPerIB) 819 if data is None: 820 return [] 821 data = data[len(iaRec)-len(NTFS_INDEX_HEADER())+iaRec['Index']['EntriesOffset']:] 822 while True: 823 ie = IndexEntry(data) 824 IndexEntries.append(ie) 825 if ie.isLastNode(): 826 break 827 data = data[ie.getSize():] 828 return IndexEntries 829 830 def walkSubNodes(self, vcn): 831 logging.debug("Inside walkSubNodes: vcn %s" % vcn) 832 entries = self.parseIndexBlocks(vcn) 833 files = [] 834 for entry in entries: 835 if entry.isSubNode(): 836 files += self.walkSubNodes(entry.getVCN()) 837 else: 838 if len(entry.getKey()) > 0 and entry.getINodeNumber() > 16: 839 fn = NTFS_FILE_NAME_ATTR(entry.getKey()) 840 if fn['FileNameType'] != FILE_NAME_DOS: 841 #inode = INODE(self.NTFSVolume) 842 #inode.FileAttributes = fn['FileAttributes'] 843 #inode.FileSize = fn['DataSize'] 844 #inode.LastDataChangeTime = datetime.fromtimestamp(getUnixTime(fn['LastDataChangeTime'])) 845 #inode.INodeNumber = entry.getINodeNumber() 846 #inode.FileName = fn['FileName'].decode('utf-16le') 847 #inode.displayName() 848 files.append(fn) 849 # if inode.FileAttributes & FILE_ATTR_I30_INDEX_PRESENT and entry.getINodeNumber() > 16: 850 # inode2 = self.NTFSVolume.getINode(entry.getINodeNumber()) 851 # inode2.walk() 852 return files 853 854 def walk(self): 855 logging.debug("Inside Walk... ") 856 files = [] 857 if INDEX_ROOT in self.Attributes: 858 ir = self.Attributes[INDEX_ROOT] 859 860 if ir.getType() & FILE_NAME: 861 for ie in ir.IndexEntries: 862 if ie.isSubNode(): 863 files += self.walkSubNodes(ie.getVCN()) 864 return files 865 else: 866 return None 867 868 def findFirstSubNode(self, vcn, toSearch): 869 def getFileName(entry): 870 if len(entry.getKey()) > 0 and entry.getINodeNumber() > 16: 871 fn = NTFS_FILE_NAME_ATTR(entry.getKey()) 872 if fn['FileNameType'] != FILE_NAME_DOS: 873 return fn['FileName'].decode('utf-16le').upper() 874 return None 875 876 entries = self.parseIndexBlocks(vcn) 877 for ie in entries: 878 name = getFileName(ie) 879 if name is not None: 880 if name == toSearch: 881 # Found! 882 return ie 883 if toSearch < name: 884 if ie.isSubNode(): 885 res = self.findFirstSubNode(ie.getVCN(), toSearch) 886 if res is not None: 887 return res 888 else: 889 # Bye bye.. not found 890 return None 891 else: 892 if ie.isSubNode(): 893 res = self.findFirstSubNode(ie.getVCN(), toSearch) 894 if res is not None: 895 return res 896 897 898 def findFirst(self, fileName): 899 # Searches for a file and returns an Index Entry. None if not found 900 901 def getFileName(entry): 902 if len(entry.getKey()) > 0 and entry.getINodeNumber() > 16: 903 fn = NTFS_FILE_NAME_ATTR(entry.getKey()) 904 if fn['FileNameType'] != FILE_NAME_DOS: 905 return fn['FileName'].decode('utf-16le').upper() 906 return None 907 908 toSearch = text_type(fileName.upper()) 909 910 if INDEX_ROOT in self.Attributes: 911 ir = self.Attributes[INDEX_ROOT] 912 if ir.getType() & FILE_NAME or 1==1: 913 for ie in ir.IndexEntries: 914 name = getFileName(ie) 915 if name is not None: 916 if name == toSearch: 917 # Found! 918 return ie 919 if toSearch < name: 920 if ie.isSubNode(): 921 res = self.findFirstSubNode(ie.getVCN(), toSearch) 922 if res is not None: 923 return res 924 else: 925 # Bye bye.. not found 926 return None 927 else: 928 if ie.isSubNode(): 929 res = self.findFirstSubNode(ie.getVCN(), toSearch) 930 if res is not None: 931 return res 932 933 def getStream(self, name): 934 return self.searchAttribute( DATA, name, findNext = False) 935 936 937 class NTFS: 938 def __init__(self, volumeName): 939 self.__volumeName = volumeName 940 self.__bootSector = None 941 self.__MFTStart = None 942 self.volumeFD = None 943 self.BPB = None 944 self.ExtendedBPB = None 945 self.RecordSize = None 946 self.IndexBlockSize = None 947 self.SectorSize = None 948 self.MFTINode = None 949 self.mountVolume() 950 951 def mountVolume(self): 952 logging.debug("Mounting volume...") 953 self.volumeFD = open(self.__volumeName,"rb") 954 self.readBootSector() 955 self.MFTINode = self.getINode(FILE_MFT) 956 # Check whether MFT is fragmented 957 attr = self.MFTINode.searchAttribute(DATA, None) 958 if attr is None: 959 # It's not 960 del self.MFTINode 961 self.MFTINode = None 962 963 def readBootSector(self): 964 logging.debug("Reading Boot Sector for %s" % self.__volumeName) 965 966 self.volumeFD.seek(0,0) 967 data = self.volumeFD.read(512) 968 while len(data) < 512: 969 data += self.volumeFD.read(512) 970 971 self.__bootSector = NTFS_BOOT_SECTOR(data) 972 self.BPB = NTFS_BPB(self.__bootSector['BPB']) 973 self.ExtendedBPB = NTFS_EXTENDED_BPB(self.__bootSector['ExtendedBPB']) 974 self.SectorSize = self.BPB['BytesPerSector'] 975 self.__MFTStart = self.BPB['BytesPerSector'] * self.BPB['SectorsPerCluster'] * self.ExtendedBPB['MFTClusterNumber'] 976 if self.ExtendedBPB['ClusterPerFileRecord'] > 0: 977 self.RecordSize = self.BPB['BytesPerSector'] * self.BPB['SectorsPerCluster'] * self.ExtendedBPB['ClusterPerFileRecord'] 978 else: 979 self.RecordSize = 1 << (-self.ExtendedBPB['ClusterPerFileRecord']) 980 if self.ExtendedBPB['ClusterPerIndexBuffer'] > 0: 981 self.IndexBlockSize = self.BPB['BytesPerSector'] * self.BPB['SectorsPerCluster'] * self.ExtendedBPB['ClusterPerIndexBuffer'] 982 else: 983 self.IndexBlockSize = 1 << (-self.ExtendedBPB['ClusterPerIndexBuffer']) 984 985 logging.debug("MFT should start at position %d" % self.__MFTStart) 986 987 def getINode(self, iNodeNum): 988 logging.debug("Trying to fetch inode %d" % iNodeNum) 989 990 newINode = INODE(self) 991 992 recordLen = self.RecordSize 993 994 # Let's calculate where in disk this iNode should be 995 if self.MFTINode and iNodeNum > FIXED_MFTS: 996 # Fragmented $MFT 997 attr = self.MFTINode.searchAttribute(DATA,None) 998 record = attr.read(iNodeNum*self.RecordSize, self.RecordSize) 999 else: 1000 diskPosition = self.__MFTStart + iNodeNum * self.RecordSize 1001 self.volumeFD.seek(diskPosition,0) 1002 record = self.volumeFD.read(recordLen) 1003 while len(record) < recordLen: 1004 record += self.volumeFD.read(recordLen-len(record)) 1005 1006 mftRecord = NTFS_MFT_RECORD(record) 1007 1008 record = newINode.PerformFixUp(mftRecord, record, self.RecordSize//self.SectorSize) 1009 newINode.INodeNumber = iNodeNum 1010 newINode.AttributesRaw = record[mftRecord['AttributesOffset']-recordLen:] 1011 newINode.parseAttributes() 1012 1013 return newINode 1014 1015 class MiniShell(cmd.Cmd): 1016 def __init__(self, volume): 1017 cmd.Cmd.__init__(self) 1018 self.volumePath = volume 1019 self.volume = NTFS(volume) 1020 self.rootINode = self.volume.getINode(5) 1021 self.prompt = '\\>' 1022 self.intro = 'Type help for list of commands' 1023 self.currentINode = self.rootINode 1024 self.completion = [] 1025 self.pwd = '\\' 1026 self.do_ls('',False) 1027 self.last_output = '' 1028 1029 def emptyline(self): 1030 pass 1031 1032 def onecmd(self,s): 1033 retVal = False 1034 try: 1035 retVal = cmd.Cmd.onecmd(self,s) 1036 except Exception as e: 1037 logging.debug('Exception:', exc_info=True) 1038 logging.error(str(e)) 1039 1040 return retVal 1041 1042 def do_exit(self,line): 1043 return True 1044 1045 def do_shell(self, line): 1046 output = os.popen(line).read() 1047 print(output) 1048 self.last_output = output 1049 1050 def do_help(self,line): 1051 print(""" 1052 cd {path} - changes the current directory to {path} 1053 pwd - shows current remote directory 1054 ls - lists all the files in the current directory 1055 lcd - change local directory 1056 get {filename} - downloads the filename from the current path 1057 cat {filename} - prints the contents of filename 1058 hexdump {filename} - hexdumps the contents of filename 1059 exit - terminates the server process (and this session) 1060 1061 """) 1062 1063 def do_lcd(self,line): 1064 if line == '': 1065 print(os.getcwd()) 1066 else: 1067 os.chdir(line) 1068 print(os.getcwd()) 1069 1070 def do_cd(self, line): 1071 p = line.replace('/','\\') 1072 oldpwd = self.pwd 1073 newPath = ntpath.normpath(ntpath.join(self.pwd,p)) 1074 if newPath == self.pwd: 1075 # Nothing changed 1076 return 1077 common = ntpath.commonprefix([newPath,oldpwd]) 1078 1079 if common == oldpwd: 1080 res = self.findPathName(ntpath.normpath(p)) 1081 else: 1082 res = self.findPathName(newPath) 1083 1084 if res is None: 1085 logging.error("Directory not found") 1086 self.pwd = oldpwd 1087 return 1088 if res.isDirectory() == 0: 1089 logging.error("Not a directory!") 1090 self.pwd = oldpwd 1091 return 1092 else: 1093 self.currentINode = res 1094 self.do_ls('', False) 1095 self.pwd = ntpath.join(self.pwd,p) 1096 self.pwd = ntpath.normpath(self.pwd) 1097 self.prompt = self.pwd + '>' 1098 1099 def findPathName(self, pathName): 1100 if pathName == '\\': 1101 return self.rootINode 1102 tmpINode = self.currentINode 1103 parts = pathName.split('\\') 1104 for part in parts: 1105 if part == '': 1106 tmpINode = self.rootINode 1107 else: 1108 res = tmpINode.findFirst(part) 1109 if res is None: 1110 return res 1111 else: 1112 tmpINode = self.volume.getINode(res.getINodeNumber()) 1113 1114 return tmpINode 1115 1116 def do_pwd(self,line): 1117 print(self.pwd) 1118 1119 def do_ls(self, line, display = True): 1120 entries = self.currentINode.walk() 1121 self.completion = [] 1122 for entry in entries: 1123 inode = INODE(self.volume) 1124 inode.FileAttributes = entry['FileAttributes'] 1125 inode.FileSize = entry['DataSize'] 1126 inode.LastDataChangeTime = datetime.fromtimestamp(getUnixTime(entry['LastDataChangeTime'])) 1127 inode.FileName = entry['FileName'].decode('utf-16le') 1128 if display is True: 1129 inode.displayName() 1130 self.completion.append((inode.FileName,inode.isDirectory())) 1131 1132 def complete_cd(self, text, line, begidx, endidx): 1133 return self.complete_get(text, line, begidx, endidx, include = 2) 1134 1135 def complete_cat(self,text,line,begidx,endidx): 1136 return self.complete_get(text, line, begidx, endidx) 1137 1138 def complete_hexdump(self,text,line,begidx,endidx): 1139 return self.complete_get(text, line, begidx, endidx) 1140 1141 def complete_get(self, text, line, begidx, endidx, include = 1): 1142 # include means 1143 # 1 just files 1144 # 2 just directories 1145 items = [] 1146 if include == 1: 1147 mask = 0 1148 else: 1149 mask = FILE_ATTR_I30_INDEX_PRESENT 1150 for i in self.completion: 1151 if i[1] == mask: 1152 items.append(i[0]) 1153 if text: 1154 return [ 1155 item for item in items 1156 if item.upper().startswith(text.upper()) 1157 ] 1158 else: 1159 return items 1160 1161 def do_hexdump(self,line): 1162 return self.do_cat(line,command = hexdump) 1163 1164 def do_cat(self, line, command = sys.stdout.write): 1165 pathName = line.replace('/','\\') 1166 pathName = ntpath.normpath(ntpath.join(self.pwd,pathName)) 1167 res = self.findPathName(pathName) 1168 if res is None: 1169 logging.error("Not found!") 1170 return 1171 if res.isDirectory() > 0: 1172 logging.error("It's a directory!") 1173 return 1174 if res.isCompressed() or res.isEncrypted() or res.isSparse(): 1175 logging.error('Cannot handle compressed/encrypted/sparse files! :(') 1176 return 1177 stream = res.getStream(None) 1178 chunks = 4096*10 1179 written = 0 1180 for i in range(stream.getDataSize()//chunks): 1181 buf = stream.read(i*chunks, chunks) 1182 written += len(buf) 1183 command(buf) 1184 if stream.getDataSize() % chunks: 1185 buf = stream.read(written, stream.getDataSize() % chunks) 1186 command(buf.decode('latin-1')) 1187 logging.info("%d bytes read" % stream.getDataSize()) 1188 1189 def do_get(self, line): 1190 pathName = line.replace('/','\\') 1191 pathName = ntpath.normpath(ntpath.join(self.pwd,pathName)) 1192 fh = open(ntpath.basename(pathName),"wb") 1193 self.do_cat(line, command = fh.write) 1194 fh.close() 1195 1196 def main(): 1197 print(version.BANNER) 1198 # Init the example's logger theme 1199 logger.init() 1200 parser = argparse.ArgumentParser(add_help = True, description = "NTFS explorer (read-only)") 1201 parser.add_argument('volume', action='store', help='NTFS volume to open (e.g. \\\\.\\C: or /dev/disk1s1)') 1202 parser.add_argument('-extract', action='store', help='extracts pathname (e.g. \\windows\\system32\\config\\sam)') 1203 parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON') 1204 1205 if len(sys.argv)==1: 1206 parser.print_help() 1207 sys.exit(1) 1208 options = parser.parse_args() 1209 1210 if options.debug is True: 1211 logging.getLogger().setLevel(logging.DEBUG) 1212 else: 1213 logging.getLogger().setLevel(logging.INFO) 1214 1215 shell = MiniShell(options.volume) 1216 if options.extract is not None: 1217 shell.onecmd("get %s"% options.extract) 1218 else: 1219 shell.cmdloop() 1220 1221 if __name__ == '__main__': 1222 main() 1223 sys.exit(1)