1 Star 1 Fork 3

张小农 / 某扫描器核心反编译

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
pefile.py 186.70 KB
一键复制 编辑 原始数据 按行查看 历史
张小农 提交于 2019-03-21 13:47 . first code
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946
# uncompyle6 version 3.2.3
# Python bytecode 3.6 (3379)
# Decompiled from: Python 3.6.8 |Anaconda custom (64-bit)| (default, Feb 21 2019, 18:30:04) [MSC v.1916 64 bit (AMD64)]
# Embedded file name: site-packages\pefile.py
"""pefile, Portable Executable reader module
All the PE file basic structures are available with their default names as
attributes of the instance returned.
Processed elements such as the import table are made available with lowercase
names, to differentiate them from the upper case basic structure names.
pefile has been tested against many edge cases such as corrupted and malformed
PEs as well as malware, which often attempts to abuse the format way beyond its
standard use. To the best of my knowledge most of the abuse is handled
gracefully.
Copyright (c) 2005-2017 Ero Carrera <ero.carrera@gmail.com>
All rights reserved.
"""
from __future__ import division
from __future__ import print_function
from builtins import bytes
from builtins import chr
from builtins import object
from builtins import range
from builtins import str
from builtins import zip
__author__ = 'Ero Carrera'
__version__ = '2017.11.5'
__contact__ = 'ero.carrera@gmail.com'
import os, struct, sys, codecs, time, math, re, string, array, mmap, ordlookup
from collections import Counter
from hashlib import sha1
from hashlib import sha256
from hashlib import sha512
from hashlib import md5
PY3 = sys.version_info > (3, )
if PY3:
long = int
def count_zeroes(data):
try:
count = data.count('\x00')
except TypeError:
count = data.count(0)
return count
fast_load = False
MAX_STRING_LENGTH = 1048576
MAX_IMPORT_NAME_LENGTH = 512
MAX_DLL_LENGTH = 512
MAX_SYMBOL_NAME_LENGTH = 512
IMAGE_DOS_SIGNATURE = 23117
IMAGE_DOSZM_SIGNATURE = 19802
IMAGE_NE_SIGNATURE = 17742
IMAGE_LE_SIGNATURE = 17740
IMAGE_LX_SIGNATURE = 22604
IMAGE_TE_SIGNATURE = 23126
IMAGE_NT_SIGNATURE = 17744
IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16
IMAGE_ORDINAL_FLAG = 2147483648
IMAGE_ORDINAL_FLAG64 = 9223372036854775808
OPTIONAL_HEADER_MAGIC_PE = 267
OPTIONAL_HEADER_MAGIC_PE_PLUS = 523
directory_entry_types = [
('IMAGE_DIRECTORY_ENTRY_EXPORT', 0),
('IMAGE_DIRECTORY_ENTRY_IMPORT', 1),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', 2),
('IMAGE_DIRECTORY_ENTRY_EXCEPTION', 3),
('IMAGE_DIRECTORY_ENTRY_SECURITY', 4),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', 5),
('IMAGE_DIRECTORY_ENTRY_DEBUG', 6),
('IMAGE_DIRECTORY_ENTRY_COPYRIGHT', 7),
('IMAGE_DIRECTORY_ENTRY_GLOBALPTR', 8),
('IMAGE_DIRECTORY_ENTRY_TLS', 9),
('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', 10),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', 11),
('IMAGE_DIRECTORY_ENTRY_IAT', 12),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', 13),
('IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR', 14),
('IMAGE_DIRECTORY_ENTRY_RESERVED', 15)]
DIRECTORY_ENTRY = dict([(e[1], e[0]) for e in directory_entry_types] + directory_entry_types)
image_characteristics = [
('IMAGE_FILE_RELOCS_STRIPPED', 1),
('IMAGE_FILE_EXECUTABLE_IMAGE', 2),
('IMAGE_FILE_LINE_NUMS_STRIPPED', 4),
('IMAGE_FILE_LOCAL_SYMS_STRIPPED', 8),
('IMAGE_FILE_AGGRESIVE_WS_TRIM', 16),
('IMAGE_FILE_LARGE_ADDRESS_AWARE', 32),
('IMAGE_FILE_16BIT_MACHINE', 64),
('IMAGE_FILE_BYTES_REVERSED_LO', 128),
('IMAGE_FILE_32BIT_MACHINE', 256),
('IMAGE_FILE_DEBUG_STRIPPED', 512),
('IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP', 1024),
('IMAGE_FILE_NET_RUN_FROM_SWAP', 2048),
('IMAGE_FILE_SYSTEM', 4096),
('IMAGE_FILE_DLL', 8192),
('IMAGE_FILE_UP_SYSTEM_ONLY', 16384),
('IMAGE_FILE_BYTES_REVERSED_HI', 32768)]
IMAGE_CHARACTERISTICS = dict([(e[1], e[0]) for e in image_characteristics] + image_characteristics)
section_characteristics = [
('IMAGE_SCN_TYPE_REG', 0),
('IMAGE_SCN_TYPE_DSECT', 1),
('IMAGE_SCN_TYPE_NOLOAD', 2),
('IMAGE_SCN_TYPE_GROUP', 4),
('IMAGE_SCN_TYPE_NO_PAD', 8),
('IMAGE_SCN_TYPE_COPY', 16),
('IMAGE_SCN_CNT_CODE', 32),
('IMAGE_SCN_CNT_INITIALIZED_DATA', 64),
('IMAGE_SCN_CNT_UNINITIALIZED_DATA', 128),
('IMAGE_SCN_LNK_OTHER', 256),
('IMAGE_SCN_LNK_INFO', 512),
('IMAGE_SCN_LNK_OVER', 1024),
('IMAGE_SCN_LNK_REMOVE', 2048),
('IMAGE_SCN_LNK_COMDAT', 4096),
('IMAGE_SCN_MEM_PROTECTED', 16384),
('IMAGE_SCN_NO_DEFER_SPEC_EXC', 16384),
('IMAGE_SCN_GPREL', 32768),
('IMAGE_SCN_MEM_FARDATA', 32768),
('IMAGE_SCN_MEM_SYSHEAP', 65536),
('IMAGE_SCN_MEM_PURGEABLE', 131072),
('IMAGE_SCN_MEM_16BIT', 131072),
('IMAGE_SCN_MEM_LOCKED', 262144),
('IMAGE_SCN_MEM_PRELOAD', 524288),
('IMAGE_SCN_ALIGN_1BYTES', 1048576),
('IMAGE_SCN_ALIGN_2BYTES', 2097152),
('IMAGE_SCN_ALIGN_4BYTES', 3145728),
('IMAGE_SCN_ALIGN_8BYTES', 4194304),
('IMAGE_SCN_ALIGN_16BYTES', 5242880),
('IMAGE_SCN_ALIGN_32BYTES', 6291456),
('IMAGE_SCN_ALIGN_64BYTES', 7340032),
('IMAGE_SCN_ALIGN_128BYTES', 8388608),
('IMAGE_SCN_ALIGN_256BYTES', 9437184),
('IMAGE_SCN_ALIGN_512BYTES', 10485760),
('IMAGE_SCN_ALIGN_1024BYTES', 11534336),
('IMAGE_SCN_ALIGN_2048BYTES', 12582912),
('IMAGE_SCN_ALIGN_4096BYTES', 13631488),
('IMAGE_SCN_ALIGN_8192BYTES', 14680064),
('IMAGE_SCN_ALIGN_MASK', 15728640),
('IMAGE_SCN_LNK_NRELOC_OVFL', 16777216),
('IMAGE_SCN_MEM_DISCARDABLE', 33554432),
('IMAGE_SCN_MEM_NOT_CACHED', 67108864),
('IMAGE_SCN_MEM_NOT_PAGED', 134217728),
('IMAGE_SCN_MEM_SHARED', 268435456),
('IMAGE_SCN_MEM_EXECUTE', 536870912),
('IMAGE_SCN_MEM_READ', 1073741824),
('IMAGE_SCN_MEM_WRITE', 2147483648)]
SECTION_CHARACTERISTICS = dict([(e[1], e[0]) for e in section_characteristics] + section_characteristics)
debug_types = [
('IMAGE_DEBUG_TYPE_UNKNOWN', 0),
('IMAGE_DEBUG_TYPE_COFF', 1),
('IMAGE_DEBUG_TYPE_CODEVIEW', 2),
('IMAGE_DEBUG_TYPE_FPO', 3),
('IMAGE_DEBUG_TYPE_MISC', 4),
('IMAGE_DEBUG_TYPE_EXCEPTION', 5),
('IMAGE_DEBUG_TYPE_FIXUP', 6),
('IMAGE_DEBUG_TYPE_OMAP_TO_SRC', 7),
('IMAGE_DEBUG_TYPE_OMAP_FROM_SRC', 8),
('IMAGE_DEBUG_TYPE_BORLAND', 9),
('IMAGE_DEBUG_TYPE_RESERVED10', 10),
('IMAGE_DEBUG_TYPE_CLSID', 11),
('IMAGE_DEBUG_TYPE_VC_FEATURE', 12),
('IMAGE_DEBUG_TYPE_POGO', 13),
('IMAGE_DEBUG_TYPE_ILTCG', 14),
('IMAGE_DEBUG_TYPE_MPX', 15)]
DEBUG_TYPE = dict([(e[1], e[0]) for e in debug_types] + debug_types)
subsystem_types = [
('IMAGE_SUBSYSTEM_UNKNOWN', 0),
('IMAGE_SUBSYSTEM_NATIVE', 1),
('IMAGE_SUBSYSTEM_WINDOWS_GUI', 2),
('IMAGE_SUBSYSTEM_WINDOWS_CUI', 3),
('IMAGE_SUBSYSTEM_OS2_CUI', 5),
('IMAGE_SUBSYSTEM_POSIX_CUI', 7),
('IMAGE_SUBSYSTEM_NATIVE_WINDOWS', 8),
('IMAGE_SUBSYSTEM_WINDOWS_CE_GUI', 9),
('IMAGE_SUBSYSTEM_EFI_APPLICATION', 10),
('IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER', 11),
('IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER', 12),
('IMAGE_SUBSYSTEM_EFI_ROM', 13),
('IMAGE_SUBSYSTEM_XBOX', 14),
('IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION', 16)]
SUBSYSTEM_TYPE = dict([(e[1], e[0]) for e in subsystem_types] + subsystem_types)
machine_types = [
('IMAGE_FILE_MACHINE_UNKNOWN', 0),
('IMAGE_FILE_MACHINE_I386', 332),
('IMAGE_FILE_MACHINE_R3000', 354),
('IMAGE_FILE_MACHINE_R4000', 358),
('IMAGE_FILE_MACHINE_R10000', 360),
('IMAGE_FILE_MACHINE_WCEMIPSV2', 361),
('IMAGE_FILE_MACHINE_ALPHA', 388),
('IMAGE_FILE_MACHINE_SH3', 418),
('IMAGE_FILE_MACHINE_SH3DSP', 419),
('IMAGE_FILE_MACHINE_SH3E', 420),
('IMAGE_FILE_MACHINE_SH4', 422),
('IMAGE_FILE_MACHINE_SH5', 424),
('IMAGE_FILE_MACHINE_ARM', 448),
('IMAGE_FILE_MACHINE_THUMB', 450),
('IMAGE_FILE_MACHINE_ARMNT', 452),
('IMAGE_FILE_MACHINE_AM33', 467),
('IMAGE_FILE_MACHINE_POWERPC', 496),
('IMAGE_FILE_MACHINE_POWERPCFP', 497),
('IMAGE_FILE_MACHINE_IA64', 512),
('IMAGE_FILE_MACHINE_MIPS16', 614),
('IMAGE_FILE_MACHINE_ALPHA64', 644),
('IMAGE_FILE_MACHINE_AXP64', 644),
('IMAGE_FILE_MACHINE_MIPSFPU', 870),
('IMAGE_FILE_MACHINE_MIPSFPU16', 1126),
('IMAGE_FILE_MACHINE_TRICORE', 1312),
('IMAGE_FILE_MACHINE_CEF', 3311),
('IMAGE_FILE_MACHINE_EBC', 3772),
('IMAGE_FILE_MACHINE_AMD64', 34404),
('IMAGE_FILE_MACHINE_M32R', 36929),
('IMAGE_FILE_MACHINE_CEE', 49390)]
MACHINE_TYPE = dict([(e[1], e[0]) for e in machine_types] + machine_types)
relocation_types = [
('IMAGE_REL_BASED_ABSOLUTE', 0),
('IMAGE_REL_BASED_HIGH', 1),
('IMAGE_REL_BASED_LOW', 2),
('IMAGE_REL_BASED_HIGHLOW', 3),
('IMAGE_REL_BASED_HIGHADJ', 4),
('IMAGE_REL_BASED_MIPS_JMPADDR', 5),
('IMAGE_REL_BASED_SECTION', 6),
('IMAGE_REL_BASED_REL', 7),
('IMAGE_REL_BASED_MIPS_JMPADDR16', 9),
('IMAGE_REL_BASED_IA64_IMM64', 9),
('IMAGE_REL_BASED_DIR64', 10),
('IMAGE_REL_BASED_HIGH3ADJ', 11)]
RELOCATION_TYPE = dict([(e[1], e[0]) for e in relocation_types] + relocation_types)
dll_characteristics = [
('IMAGE_LIBRARY_PROCESS_INIT', 1),
('IMAGE_LIBRARY_PROCESS_TERM', 2),
('IMAGE_LIBRARY_THREAD_INIT', 4),
('IMAGE_LIBRARY_THREAD_TERM', 8),
('IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA', 32),
('IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE', 64),
('IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY', 128),
('IMAGE_DLLCHARACTERISTICS_NX_COMPAT', 256),
('IMAGE_DLLCHARACTERISTICS_NO_ISOLATION', 512),
('IMAGE_DLLCHARACTERISTICS_NO_SEH', 1024),
('IMAGE_DLLCHARACTERISTICS_NO_BIND', 2048),
('IMAGE_DLLCHARACTERISTICS_APPCONTAINER', 4096),
('IMAGE_DLLCHARACTERISTICS_WDM_DRIVER', 8192),
('IMAGE_DLLCHARACTERISTICS_GUARD_CF', 16384),
('IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE', 32768)]
DLL_CHARACTERISTICS = dict([(e[1], e[0]) for e in dll_characteristics] + dll_characteristics)
resource_type = [
('RT_CURSOR', 1),
('RT_BITMAP', 2),
('RT_ICON', 3),
('RT_MENU', 4),
('RT_DIALOG', 5),
('RT_STRING', 6),
('RT_FONTDIR', 7),
('RT_FONT', 8),
('RT_ACCELERATOR', 9),
('RT_RCDATA', 10),
('RT_MESSAGETABLE', 11),
('RT_GROUP_CURSOR', 12),
('RT_GROUP_ICON', 14),
('RT_VERSION', 16),
('RT_DLGINCLUDE', 17),
('RT_PLUGPLAY', 19),
('RT_VXD', 20),
('RT_ANICURSOR', 21),
('RT_ANIICON', 22),
('RT_HTML', 23),
('RT_MANIFEST', 24)]
RESOURCE_TYPE = dict([(e[1], e[0]) for e in resource_type] + resource_type)
lang = [
('LANG_NEUTRAL', 0),
('LANG_INVARIANT', 127),
('LANG_AFRIKAANS', 54),
('LANG_ALBANIAN', 28),
('LANG_ARABIC', 1),
('LANG_ARMENIAN', 43),
('LANG_ASSAMESE', 77),
('LANG_AZERI', 44),
('LANG_BASQUE', 45),
('LANG_BELARUSIAN', 35),
('LANG_BENGALI', 69),
('LANG_BULGARIAN', 2),
('LANG_CATALAN', 3),
('LANG_CHINESE', 4),
('LANG_CROATIAN', 26),
('LANG_CZECH', 5),
('LANG_DANISH', 6),
('LANG_DIVEHI', 101),
('LANG_DUTCH', 19),
('LANG_ENGLISH', 9),
('LANG_ESTONIAN', 37),
('LANG_FAEROESE', 56),
('LANG_FARSI', 41),
('LANG_FINNISH', 11),
('LANG_FRENCH', 12),
('LANG_GALICIAN', 86),
('LANG_GEORGIAN', 55),
('LANG_GERMAN', 7),
('LANG_GREEK', 8),
('LANG_GUJARATI', 71),
('LANG_HEBREW', 13),
('LANG_HINDI', 57),
('LANG_HUNGARIAN', 14),
('LANG_ICELANDIC', 15),
('LANG_INDONESIAN', 33),
('LANG_ITALIAN', 16),
('LANG_JAPANESE', 17),
('LANG_KANNADA', 75),
('LANG_KASHMIRI', 96),
('LANG_KAZAK', 63),
('LANG_KONKANI', 87),
('LANG_KOREAN', 18),
('LANG_KYRGYZ', 64),
('LANG_LATVIAN', 38),
('LANG_LITHUANIAN', 39),
('LANG_MACEDONIAN', 47),
('LANG_MALAY', 62),
('LANG_MALAYALAM', 76),
('LANG_MANIPURI', 88),
('LANG_MARATHI', 78),
('LANG_MONGOLIAN', 80),
('LANG_NEPALI', 97),
('LANG_NORWEGIAN', 20),
('LANG_ORIYA', 72),
('LANG_POLISH', 21),
('LANG_PORTUGUESE', 22),
('LANG_PUNJABI', 70),
('LANG_ROMANIAN', 24),
('LANG_RUSSIAN', 25),
('LANG_SANSKRIT', 79),
('LANG_SERBIAN', 26),
('LANG_SINDHI', 89),
('LANG_SLOVAK', 27),
('LANG_SLOVENIAN', 36),
('LANG_SPANISH', 10),
('LANG_SWAHILI', 65),
('LANG_SWEDISH', 29),
('LANG_SYRIAC', 90),
('LANG_TAMIL', 73),
('LANG_TATAR', 68),
('LANG_TELUGU', 74),
('LANG_THAI', 30),
('LANG_TURKISH', 31),
('LANG_UKRAINIAN', 34),
('LANG_URDU', 32),
('LANG_UZBEK', 67),
('LANG_VIETNAMESE', 42),
('LANG_GAELIC', 60),
('LANG_MALTESE', 58),
('LANG_MAORI', 40),
('LANG_RHAETO_ROMANCE', 23),
('LANG_SAAMI', 59),
('LANG_SORBIAN', 46),
('LANG_SUTU', 48),
('LANG_TSONGA', 49),
('LANG_TSWANA', 50),
('LANG_VENDA', 51),
('LANG_XHOSA', 52),
('LANG_ZULU', 53),
('LANG_ESPERANTO', 143),
('LANG_WALON', 144),
('LANG_CORNISH', 145),
('LANG_WELSH', 146),
('LANG_BRETON', 147)]
LANG = dict(lang + [(e[1], e[0]) for e in lang])
sublang = [
('SUBLANG_NEUTRAL', 0),
('SUBLANG_DEFAULT', 1),
('SUBLANG_SYS_DEFAULT', 2),
('SUBLANG_ARABIC_SAUDI_ARABIA', 1),
('SUBLANG_ARABIC_IRAQ', 2),
('SUBLANG_ARABIC_EGYPT', 3),
('SUBLANG_ARABIC_LIBYA', 4),
('SUBLANG_ARABIC_ALGERIA', 5),
('SUBLANG_ARABIC_MOROCCO', 6),
('SUBLANG_ARABIC_TUNISIA', 7),
('SUBLANG_ARABIC_OMAN', 8),
('SUBLANG_ARABIC_YEMEN', 9),
('SUBLANG_ARABIC_SYRIA', 10),
('SUBLANG_ARABIC_JORDAN', 11),
('SUBLANG_ARABIC_LEBANON', 12),
('SUBLANG_ARABIC_KUWAIT', 13),
('SUBLANG_ARABIC_UAE', 14),
('SUBLANG_ARABIC_BAHRAIN', 15),
('SUBLANG_ARABIC_QATAR', 16),
('SUBLANG_AZERI_LATIN', 1),
('SUBLANG_AZERI_CYRILLIC', 2),
('SUBLANG_CHINESE_TRADITIONAL', 1),
('SUBLANG_CHINESE_SIMPLIFIED', 2),
('SUBLANG_CHINESE_HONGKONG', 3),
('SUBLANG_CHINESE_SINGAPORE', 4),
('SUBLANG_CHINESE_MACAU', 5),
('SUBLANG_DUTCH', 1),
('SUBLANG_DUTCH_BELGIAN', 2),
('SUBLANG_ENGLISH_US', 1),
('SUBLANG_ENGLISH_UK', 2),
('SUBLANG_ENGLISH_AUS', 3),
('SUBLANG_ENGLISH_CAN', 4),
('SUBLANG_ENGLISH_NZ', 5),
('SUBLANG_ENGLISH_EIRE', 6),
('SUBLANG_ENGLISH_SOUTH_AFRICA', 7),
('SUBLANG_ENGLISH_JAMAICA', 8),
('SUBLANG_ENGLISH_CARIBBEAN', 9),
('SUBLANG_ENGLISH_BELIZE', 10),
('SUBLANG_ENGLISH_TRINIDAD', 11),
('SUBLANG_ENGLISH_ZIMBABWE', 12),
('SUBLANG_ENGLISH_PHILIPPINES', 13),
('SUBLANG_FRENCH', 1),
('SUBLANG_FRENCH_BELGIAN', 2),
('SUBLANG_FRENCH_CANADIAN', 3),
('SUBLANG_FRENCH_SWISS', 4),
('SUBLANG_FRENCH_LUXEMBOURG', 5),
('SUBLANG_FRENCH_MONACO', 6),
('SUBLANG_GERMAN', 1),
('SUBLANG_GERMAN_SWISS', 2),
('SUBLANG_GERMAN_AUSTRIAN', 3),
('SUBLANG_GERMAN_LUXEMBOURG', 4),
('SUBLANG_GERMAN_LIECHTENSTEIN', 5),
('SUBLANG_ITALIAN', 1),
('SUBLANG_ITALIAN_SWISS', 2),
('SUBLANG_KASHMIRI_SASIA', 2),
('SUBLANG_KASHMIRI_INDIA', 2),
('SUBLANG_KOREAN', 1),
('SUBLANG_LITHUANIAN', 1),
('SUBLANG_MALAY_MALAYSIA', 1),
('SUBLANG_MALAY_BRUNEI_DARUSSALAM', 2),
('SUBLANG_NEPALI_INDIA', 2),
('SUBLANG_NORWEGIAN_BOKMAL', 1),
('SUBLANG_NORWEGIAN_NYNORSK', 2),
('SUBLANG_PORTUGUESE', 2),
('SUBLANG_PORTUGUESE_BRAZILIAN', 1),
('SUBLANG_SERBIAN_LATIN', 2),
('SUBLANG_SERBIAN_CYRILLIC', 3),
('SUBLANG_SPANISH', 1),
('SUBLANG_SPANISH_MEXICAN', 2),
('SUBLANG_SPANISH_MODERN', 3),
('SUBLANG_SPANISH_GUATEMALA', 4),
('SUBLANG_SPANISH_COSTA_RICA', 5),
('SUBLANG_SPANISH_PANAMA', 6),
('SUBLANG_SPANISH_DOMINICAN_REPUBLIC', 7),
('SUBLANG_SPANISH_VENEZUELA', 8),
('SUBLANG_SPANISH_COLOMBIA', 9),
('SUBLANG_SPANISH_PERU', 10),
('SUBLANG_SPANISH_ARGENTINA', 11),
('SUBLANG_SPANISH_ECUADOR', 12),
('SUBLANG_SPANISH_CHILE', 13),
('SUBLANG_SPANISH_URUGUAY', 14),
('SUBLANG_SPANISH_PARAGUAY', 15),
('SUBLANG_SPANISH_BOLIVIA', 16),
('SUBLANG_SPANISH_EL_SALVADOR', 17),
('SUBLANG_SPANISH_HONDURAS', 18),
('SUBLANG_SPANISH_NICARAGUA', 19),
('SUBLANG_SPANISH_PUERTO_RICO', 20),
('SUBLANG_SWEDISH', 1),
('SUBLANG_SWEDISH_FINLAND', 2),
('SUBLANG_URDU_PAKISTAN', 1),
('SUBLANG_URDU_INDIA', 2),
('SUBLANG_UZBEK_LATIN', 1),
('SUBLANG_UZBEK_CYRILLIC', 2),
('SUBLANG_DUTCH_SURINAM', 3),
('SUBLANG_ROMANIAN', 1),
('SUBLANG_ROMANIAN_MOLDAVIA', 2),
('SUBLANG_RUSSIAN', 1),
('SUBLANG_RUSSIAN_MOLDAVIA', 2),
('SUBLANG_CROATIAN', 1),
('SUBLANG_LITHUANIAN_CLASSIC', 2),
('SUBLANG_GAELIC', 1),
('SUBLANG_GAELIC_SCOTTISH', 2),
('SUBLANG_GAELIC_MANX', 3)]
SUBLANG = dict(sublang + [(e[1], e[0]) for e in sublang])
SUBLANG = dict(sublang)
for sublang_name, sublang_value in sublang:
if sublang_value in SUBLANG:
SUBLANG[sublang_value].append(sublang_name)
else:
SUBLANG[sublang_value] = [
sublang_name]
def get_sublang_name_for_lang(lang_value, sublang_value):
lang_name = LANG.get(lang_value, '*unknown*')
for sublang_name in SUBLANG.get(sublang_value, list()):
if lang_name in sublang_name:
return sublang_name
return SUBLANG.get(sublang_value, ['*unknown*'])[0]
def parse_strings(data, counter, l):
i = 0
error_count = 0
while i < len(data):
data_slice = data[i:i + 2]
if len(data_slice) < 2:
break
len_ = struct.unpack('<h', data_slice)[0]
i += 2
if len_ != 0:
if 0 <= len_ * 2 <= len(data):
try:
l[counter] = b(data[i:i + len_ * 2]).decode('utf-16le')
except UnicodeDecodeError:
error_count += 1
if error_count >= 3:
break
i += len_ * 2
counter += 1
def retrieve_flags(flag_dict, flag_filter):
"""Read the flags from a dictionary and return them in a usable form.
Will return a list of (flag, value) for all flags in "flag_dict"
matching the filter "flag_filter".
"""
return [(f[0], f[1]) for f in list(flag_dict.items()) if isinstance(f[0], (str, bytes)) if f[0].startswith(flag_filter)]
def set_flags(obj, flag_field, flags):
"""Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attributes named after the flags provided in
"flags" and valued True/False, matching the results of applying each
flag value from "flags" to flag_field.
"""
for flag in flags:
if flag[1] & flag_field:
obj.__dict__[flag[0]] = True
else:
obj.__dict__[flag[0]] = False
def power_of_two(val):
return val != 0 and val & val - 1 == 0
if not PY3:
def handler(err):
start = err.start
end = err.end
values = [('\\u{0:04x}' if ord(err.object[i]) > 255 else '\\x{0:02x}', ord(err.object[i])) for i in range(start, end)]
return (
('').join([elm[0].format(elm[1]) for elm in values]),
end)
import codecs
codecs.register_error('backslashreplace_', handler)
def b(x):
return x
else:
import codecs
codecs.register_error('backslashreplace_', codecs.lookup_error('backslashreplace'))
def b(x):
if isinstance(x, (bytes, bytearray)):
return bytes(x)
else:
return codecs.encode(x, 'cp1252')
FILE_ALIGNMENT_HARDCODED_VALUE = 512
FileAlignment_Warning = False
SectionAlignment_Warning = False
class UnicodeStringWrapperPostProcessor(object):
"""This class attempts to help the process of identifying strings
that might be plain Unicode or Pascal. A list of strings will be
wrapped on it with the hope the overlappings will help make the
decision about their type."""
def __init__(self, pe, rva_ptr):
self.pe = pe
self.rva_ptr = rva_ptr
self.string = None
def get_rva(self):
"""Get the RVA of the string."""
return self.rva_ptr
def __str__(self):
"""Return the escaped UTF-8 representation of the string."""
return self.decode('utf-8', 'backslashreplace_')
def decode(self, *args):
if not self.string:
return ''
else:
return self.string.decode(*args)
def invalidate(self):
"""Make this instance None, to express it's no known string type."""
self = None
def render_pascal_16(self):
self.string = self.pe.get_string_u_at_rva(self.rva_ptr + 2,
max_length=self.get_pascal_16_length())
def get_pascal_16_length(self):
return self._UnicodeStringWrapperPostProcessor__get_word_value_at_rva(self.rva_ptr)
def __get_word_value_at_rva(self, rva):
try:
data = self.pe.get_data(self.rva_ptr, 2)
except PEFormatError as e:
return False
if len(data) < 2:
return False
else:
return struct.unpack('<H', data)[0]
def ask_unicode_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
"""
if self._UnicodeStringWrapperPostProcessor__get_word_value_at_rva(next_rva_ptr - 2) == 0:
self.length = next_rva_ptr - self.rva_ptr
return True
else:
return False
def render_unicode_16(self):
self.string = self.pe.get_string_u_at_rva(self.rva_ptr)
class PEFormatError(Exception):
"""Generic PE format error exception."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Dump(object):
"""Convenience class for dumping the PE information."""
def __init__(self):
self.text = list()
def add_lines(self, txt, indent=0):
"""Adds a list of lines.
The list can be indented with the optional argument 'indent'.
"""
for line in txt:
self.add_line(line, indent)
def add_line(self, txt, indent=0):
"""Adds a line.
The line can be indented with the optional argument 'indent'.
"""
self.add(txt + '\n', indent)
def add(self, txt, indent=0):
"""Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'.
"""
self.text.append(('{0}{1}').format(' ' * indent, txt))
def add_header(self, txt):
"""Adds a header element."""
self.add_line(('{0}{1}{0}\n').format('----------', txt))
def add_newline(self):
"""Adds a newline."""
self.text.append('\n')
def get_text(self):
"""Get the text in its current state."""
return ('').join((('{0}').format(b) for b in self.text))
STRUCT_SIZEOF_TYPES = {'x':1,
'c':1, 'b':1, 'B':1, 'h':2,
'H':2, 'i':4,
'I':4, 'l':4, 'L':4, 'f':4, 'q':8,
'Q':8, 'd':8, 's':1}
class Structure(object):
"""Prepare structure object to extract members from data.
Format is a list containing definitions for the elements
of the structure.
"""
def __init__(self, format, name=None, file_offset=None):
self.__format__ = '<'
self.__keys__ = []
self.__format_length__ = 0
self.__field_offsets__ = dict()
self.__unpacked_data_elms__ = []
self.__set_format__(format[1])
self.__all_zeroes__ = False
self.__file_offset__ = file_offset
if name:
self.name = name
else:
self.name = format[0]
def __get_format__(self):
return self.__format__
def get_field_absolute_offset(self, field_name):
"""Return the offset within the field for the requested field in the structure."""
return self.__file_offset__ + self.__field_offsets__[field_name]
def get_field_relative_offset(self, field_name):
"""Return the offset within the structure for the requested field."""
return self.__field_offsets__[field_name]
def get_file_offset(self):
return self.__file_offset__
def set_file_offset(self, offset):
self.__file_offset__ = offset
def all_zeroes(self):
"""Returns true is the unpacked data is all zeros."""
return self.__all_zeroes__
def sizeof_type(self, t):
count = 1
_t = t
if t[0] in string.digits:
count = int(('').join([d for d in t if d in string.digits]))
_t = ('').join([d for d in t if d not in string.digits])
return STRUCT_SIZEOF_TYPES[_t] * count
def __set_format__(self, format):
offset = 0
for elm in format:
if ',' in elm:
elm_type, elm_name = elm.split(',', 1)
self.__format__ += elm_type
self.__unpacked_data_elms__.append(None)
elm_names = elm_name.split(',')
names = []
for elm_name in elm_names:
if elm_name in self.__keys__:
search_list = [x[:len(elm_name)] for x in self.__keys__]
occ_count = search_list.count(elm_name)
elm_name = ('{0}_{1:d}').format(elm_name, occ_count)
names.append(elm_name)
self.__field_offsets__[elm_name] = offset
offset += self.sizeof_type(elm_type)
self.__keys__.append(names)
self.__format_length__ = struct.calcsize(self.__format__)
def sizeof(self):
"""Return size of the structure."""
return self.__format_length__
def __unpack__(self, data):
data = b(data)
if len(data) > self.__format_length__:
data = data[:self.__format_length__]
else:
if len(data) < self.__format_length__:
raise PEFormatError('Data length less than expected header length.')
if count_zeroes(data) == len(data):
self.__all_zeroes__ = True
self.__unpacked_data_elms__ = struct.unpack(self.__format__, data)
for i in range(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
setattr(self, key, self.__unpacked_data_elms__[i])
def __pack__(self):
new_values = []
for i in range(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
new_val = getattr(self, key)
old_val = self.__unpacked_data_elms__[i]
if new_val != old_val:
break
new_values.append(new_val)
return struct.pack(self.__format__, *new_values)
def __str__(self):
return ('\n').join(self.dump())
def __repr__(self):
return '<Structure: %s>' % (' ').join([(' ').join(s.split()) for s in self.dump()])
def dump(self, indentation=0):
"""Returns a string representation of the structure."""
dump = []
dump.append(('[{0}]').format(self.name))
printable_bytes = [ord(i) for i in string.printable if i not in string.whitespace]
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, (int, long)):
val_str = '0x%-8X' % val
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))
except ValueError as e:
val_str += ' [INVALID TIME]'
else:
val_str = bytearray(val)
val_str = ('').join([chr(i) if i in printable_bytes else ('\\x{0:02x}').format(i) for i in val_str.rstrip(b'\x00')])
dump.append('0x%-8X 0x%-3X %-30s %s' % (
self.__field_offsets__[key] + self.__file_offset__,
self.__field_offsets__[key], key + ':', val_str))
return dump
def dump_dict(self):
"""Returns a dictionary representation of the structure."""
dump_dict = dict()
dump_dict['Structure'] = self.name
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, (int, long)):
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val = '0x%-8X [%s UTC]' % (val, time.asctime(time.gmtime(val)))
except ValueError as e:
val = '0x%-8X [INVALID TIME]' % val
else:
val = ('').join(((chr(d) if chr(d) in string.printable else '\\x%02x' % d) for d in [isinstance(c, int)ord(c)c for c in val]))
dump_dict[key] = {'FileOffset':self.__field_offsets__[key] + self.__file_offset__, 'Offset':self.__field_offsets__[key],
'Value':val}
return dump_dict
class SectionStructure(Structure):
"""Convenience section handling class."""
def __init__(self, *argl, **argd):
if 'pe' in argd:
self.pe = argd['pe']
del argd['pe']
Structure.__init__(self, *argl, **argd)
def get_data(self, start=None, length=None):
"""Get data chunk from a section.
Allows to query data from the section by passing the
addresses where the PE file would be loaded by default.
It is then possible to retrieve code and data by its real
addresses as it would be if loaded.
Returns bytes() under Python 3.x and set() under 2.7
"""
PointerToRawData_adj = self.pe.adjust_FileAlignment(self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment)
VirtualAddress_adj = self.pe.adjust_SectionAlignment(self.VirtualAddress, self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment)
if start is None:
offset = PointerToRawData_adj
else:
offset = start - VirtualAddress_adj + PointerToRawData_adj
if length is not None:
end = offset + length
else:
end = offset + self.SizeOfRawData
if end > self.PointerToRawData + self.SizeOfRawData:
end = self.PointerToRawData + self.SizeOfRawData
return self.pe.__data__[offset:end]
def __setattr__(self, name, val):
if name == 'Characteristics':
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
set_flags(self, val, section_flags)
else:
if 'IMAGE_SCN_' in name:
if hasattr(self, name):
if val:
self.__dict__['Characteristics'] |= SECTION_CHARACTERISTICS[name]
else:
self.__dict__['Characteristics'] ^= SECTION_CHARACTERISTICS[name]
self.__dict__[name] = val
def get_rva_from_offset(self, offset):
return offset - self.pe.adjust_FileAlignment(self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment) + self.pe.adjust_SectionAlignment(self.VirtualAddress, self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment)
def get_offset_from_rva(self, rva):
return rva - self.pe.adjust_SectionAlignment(self.VirtualAddress, self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment) + self.pe.adjust_FileAlignment(self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment)
def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if self.PointerToRawData is None:
return False
else:
return self.pe.adjust_FileAlignment(self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment) <= offset < self.pe.adjust_FileAlignment(self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment) + self.SizeOfRawData
def contains_rva(self, rva):
"""Check whether the section contains the address provided."""
if len(self.pe.__data__) - self.pe.adjust_FileAlignment(self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment) < self.SizeOfRawData:
size = self.Misc_VirtualSize
else:
size = max(self.SizeOfRawData, self.Misc_VirtualSize)
VirtualAddress_adj = self.pe.adjust_SectionAlignment(self.VirtualAddress, self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment)
if self.next_section_virtual_address is not None:
if self.next_section_virtual_address > self.VirtualAddress:
if VirtualAddress_adj + size > self.next_section_virtual_address:
size = self.next_section_virtual_address - VirtualAddress_adj
return VirtualAddress_adj <= rva < VirtualAddress_adj + size
def contains(self, rva):
return self.contains_rva(rva)
def get_entropy(self):
"""Calculate and return the entropy for the section."""
return self.entropy_H(self.get_data())
def get_hash_sha1(self):
"""Get the SHA-1 hex-digest of the section's data."""
if sha1 is not None:
return sha1(self.get_data()).hexdigest()
def get_hash_sha256(self):
"""Get the SHA-256 hex-digest of the section's data."""
if sha256 is not None:
return sha256(self.get_data()).hexdigest()
def get_hash_sha512(self):
"""Get the SHA-512 hex-digest of the section's data."""
if sha512 is not None:
return sha512(self.get_data()).hexdigest()
def get_hash_md5(self):
"""Get the MD5 hex-digest of the section's data."""
if md5 is not None:
return md5(self.get_data()).hexdigest()
def entropy_H(self, data):
"""Calculate the entropy of a chunk of data."""
if len(data) == 0:
return 0.0
else:
occurences = Counter(bytearray(data))
entropy = 0
for x in occurences.values():
p_x = float(x) / len(data)
entropy -= p_x * math.log(p_x, 2)
return entropy
class DataContainer(object):
"""Generic data container."""
def __init__(self, **args):
bare_setattr = super(DataContainer, self).__setattr__
for key, value in list(args.items()):
bare_setattr(key, value)
class ImportDescData(DataContainer):
"""Holds import descriptor information.
dll: name of the imported DLL
imports: list of imported symbols (ImportData instances)
struct: IMAGE_IMPORT_DESCRIPTOR structure
"""
pass
class ImportData(DataContainer):
"""Holds imported symbol's information.
ordinal: Ordinal of the symbol
name: Name of the symbol
bound: If the symbol is bound, this contains
the address.
"""
def __setattr__(self, name, val):
if hasattr(self, 'ordinal') and hasattr(self, 'bound') and hasattr(self, 'name'):
if name == 'ordinal':
if self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
else:
if self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
self.struct_table.Ordinal = ordinal_flag | val & 65535
self.struct_table.AddressOfData = self.struct_table.Ordinal
self.struct_table.Function = self.struct_table.Ordinal
self.struct_table.ForwarderString = self.struct_table.Ordinal
else:
if name == 'bound':
if self.struct_iat is not None:
self.struct_iat.AddressOfData = val
self.struct_iat.AddressOfData = self.struct_iat.AddressOfData
self.struct_iat.Function = self.struct_iat.AddressOfData
self.struct_iat.ForwarderString = self.struct_iat.AddressOfData
else:
if name == 'address':
self.struct_table.AddressOfData = val
self.struct_table.Ordinal = self.struct_table.AddressOfData
self.struct_table.Function = self.struct_table.AddressOfData
self.struct_table.ForwarderString = self.struct_table.AddressOfData
else:
if name == 'name':
if self.name_offset:
name_rva = self.pe.get_rva_from_offset(self.name_offset)
self.pe.set_dword_at_offset(self.ordinal_offset, 0 | name_rva)
if len(val) > len(self.name):
pass
self.pe.set_bytes_at_offset(self.name_offset, val)
self.__dict__[name] = val
class ExportDirData(DataContainer):
"""Holds export directory information.
struct: IMAGE_EXPORT_DIRECTORY structure
symbols: list of exported symbols (ExportData instances)
"""
pass
class ExportData(DataContainer):
"""Holds exported symbols' information.
ordinal: ordinal of the symbol
address: address of the symbol
name: name of the symbol (None if the symbol is
exported by ordinal only)
forwarder: if the symbol is forwarded it will
contain the name of the target symbol,
None otherwise.
"""
def __setattr__(self, name, val):
if hasattr(self, 'ordinal') and hasattr(self, 'address') and hasattr(self, 'forwarder') and hasattr(self, 'name'):
if name == 'ordinal':
self.pe.set_word_at_offset(self.ordinal_offset, val)
else:
if name == 'address':
self.pe.set_dword_at_offset(self.address_offset, val)
else:
if name == 'name':
if len(val) > len(self.name):
pass
self.pe.set_bytes_at_offset(self.name_offset, val)
else:
if name == 'forwarder':
if len(val) > len(self.forwarder):
pass
self.pe.set_bytes_at_offset(self.forwarder_offset, val)
self.__dict__[name] = val
class ResourceDirData(DataContainer):
"""Holds resource directory information.
struct: IMAGE_RESOURCE_DIRECTORY structure
entries: list of entries (ResourceDirEntryData instances)
"""
pass
class ResourceDirEntryData(DataContainer):
"""Holds resource directory entry data.
struct: IMAGE_RESOURCE_DIRECTORY_ENTRY structure
name: If the resource is identified by name this
attribute will contain the name string. None
otherwise. If identified by id, the id is
available at 'struct.Id'
id: the id, also in struct.Id
directory: If this entry has a lower level directory
this attribute will point to the
ResourceDirData instance representing it.
data: If this entry has no further lower directories
and points to the actual resource data, this
attribute will reference the corresponding
ResourceDataEntryData instance.
(Either of the 'directory' or 'data' attribute will exist,
but not both.)
"""
pass
class ResourceDataEntryData(DataContainer):
"""Holds resource data entry information.
struct: IMAGE_RESOURCE_DATA_ENTRY structure
lang: Primary language ID
sublang: Sublanguage ID
"""
pass
class DebugData(DataContainer):
"""Holds debug information.
struct: IMAGE_DEBUG_DIRECTORY structure
entries: list of entries (IMAGE_DEBUG_TYPE instances)
"""
pass
class BaseRelocationData(DataContainer):
"""Holds base relocation information.
struct: IMAGE_BASE_RELOCATION structure
entries: list of relocation data (RelocationData instances)
"""
pass
class RelocationData(DataContainer):
"""Holds relocation information.
type: Type of relocation
The type string is can be obtained by
RELOCATION_TYPE[type]
rva: RVA of the relocation
"""
def __setattr__(self, name, val):
if hasattr(self, 'struct'):
word = self.struct.Data
if name == 'type':
word = val << 12 | word & 4095
else:
if name == 'rva':
offset = val - self.base_rva
if offset < 0:
offset = 0
word = word & 61440 | offset & 4095
self.struct.Data = word
self.__dict__[name] = val
class TlsData(DataContainer):
"""Holds TLS information.
struct: IMAGE_TLS_DIRECTORY structure
"""
pass
class BoundImportDescData(DataContainer):
"""Holds bound import descriptor data.
This directory entry will provide with information on the
DLLs this PE files has been bound to (if bound at all).
The structure will contain the name and timestamp of the
DLL at the time of binding so that the loader can know
whether it differs from the one currently present in the
system and must, therefore, re-bind the PE's imports.
struct: IMAGE_BOUND_IMPORT_DESCRIPTOR structure
name: DLL name
entries: list of entries (BoundImportRefData instances)
the entries will exist if this DLL has forwarded
symbols. If so, the destination DLL will have an
entry in this list.
"""
pass
class LoadConfigData(DataContainer):
"""Holds Load Config data.
struct: IMAGE_LOAD_CONFIG_DIRECTORY structure
name: dll name
"""
pass
class BoundImportRefData(DataContainer):
"""Holds bound import forwarder reference data.
Contains the same information as the bound descriptor but
for forwarded DLLs, if any.
struct: IMAGE_BOUND_FORWARDER_REF structure
name: dll name
"""
pass
if PY3:
allowed_filename = b(string.ascii_lowercase + string.ascii_uppercase + string.digits + "!#$%&'()-@^_`{}~+,.;=[]")
else:
allowed_filename = b(string.lowercase + string.uppercase + string.digits + b"!#$%&'()-@^_`{}~+,.;=[]")
def is_valid_dos_filename(s):
if s is None or not isinstance(s, (str, bytes, bytearray)):
return False
else:
allowed = allowed_filename + b'\\/'
for c in set(s):
if c not in allowed:
return False
return True
if PY3:
allowed_function_name = b(string.ascii_lowercase + string.ascii_uppercase + string.digits + '_?@$()<>')
else:
allowed_function_name = b(string.lowercase + string.uppercase + string.digits + b'_?@$()<>')
def is_valid_function_name(s):
if s is None or not isinstance(s, (str, bytes, bytearray)):
return False
else:
for c in set(s):
if c not in allowed_function_name:
return False
return True
class PE(object):
"""A Portable Executable representation.
This class provides access to most of the information in a PE file.
It expects to be supplied the name of the file to load or PE data
to process and an optional argument 'fast_load' (False by default)
which controls whether to load all the directories information,
which can be quite time consuming.
pe = pefile.PE('module.dll')
pe = pefile.PE(name='module.dll')
would load 'module.dll' and process it. If the data would be already
available in a buffer the same could be achieved with:
pe = pefile.PE(data=module_dll_data)
The "fast_load" can be set to a default by setting its value in the
module itself by means, for instance, of a "pefile.fast_load = True".
That will make all the subsequent instances not to load the
whole PE structure. The "full_load" method can be used to parse
the missing data at a later stage.
Basic headers information will be available in the attributes:
DOS_HEADER
NT_HEADERS
FILE_HEADER
OPTIONAL_HEADER
All of them will contain among their attributes the members of the
corresponding structures as defined in WINNT.H
The raw data corresponding to the header (from the beginning of the
file up to the start of the first section) will be available in the
instance's attribute 'header' as a string.
The sections will be available as a list in the 'sections' attribute.
Each entry will contain as attributes all the structure's members.
Directory entries will be available as attributes (if they exist):
(no other entries are processed at this point)
DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances)
DIRECTORY_ENTRY_EXPORT (ExportDirData instance)
DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance)
DIRECTORY_ENTRY_DEBUG (list of DebugData instances)
DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances)
DIRECTORY_ENTRY_TLS
DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances)
The following dictionary attributes provide ways of mapping different
constants. They will accept the numeric value and return the string
representation and the opposite, feed in the string and get the
numeric constant:
DIRECTORY_ENTRY
IMAGE_CHARACTERISTICS
SECTION_CHARACTERISTICS
DEBUG_TYPE
SUBSYSTEM_TYPE
MACHINE_TYPE
RELOCATION_TYPE
RESOURCE_TYPE
LANG
SUBLANG
"""
__IMAGE_DOS_HEADER_format__ = ('IMAGE_DOS_HEADER', ('H,e_magic', 'H,e_cblp', 'H,e_cp', 'H,e_crlc', 'H,e_cparhdr', 'H,e_minalloc', 'H,e_maxalloc', 'H,e_ss', 'H,e_sp', 'H,e_csum', 'H,e_ip', 'H,e_cs', 'H,e_lfarlc', 'H,e_ovno', '8s,e_res', 'H,e_oemid', 'H,e_oeminfo', '20s,e_res2', 'I,e_lfanew'))
__IMAGE_FILE_HEADER_format__ = ('IMAGE_FILE_HEADER', ('H,Machine', 'H,NumberOfSections', 'I,TimeDateStamp', 'I,PointerToSymbolTable', 'I,NumberOfSymbols', 'H,SizeOfOptionalHeader', 'H,Characteristics'))
__IMAGE_DATA_DIRECTORY_format__ = ('IMAGE_DATA_DIRECTORY', ('I,VirtualAddress', 'I,Size'))
__IMAGE_OPTIONAL_HEADER_format__ = ('IMAGE_OPTIONAL_HEADER', ('H,Magic', 'B,MajorLinkerVersion', 'B,MinorLinkerVersion', 'I,SizeOfCode', 'I,SizeOfInitializedData', 'I,SizeOfUninitializedData', 'I,AddressOfEntryPoint', 'I,BaseOfCode', 'I,BaseOfData', 'I,ImageBase', 'I,SectionAlignment', 'I,FileAlignment', 'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion', 'H,MajorImageVersion', 'H,MinorImageVersion', 'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion', 'I,Reserved1', 'I,SizeOfImage', 'I,SizeOfHeaders', 'I,CheckSum', 'H,Subsystem', 'H,DllCharacteristics', 'I,SizeOfStackReserve', 'I,SizeOfStackCommit', 'I,SizeOfHeapReserve', 'I,SizeOfHeapCommit', 'I,LoaderFlags', 'I,NumberOfRvaAndSizes'))
__IMAGE_OPTIONAL_HEADER64_format__ = ('IMAGE_OPTIONAL_HEADER64', ('H,Magic', 'B,MajorLinkerVersion', 'B,MinorLinkerVersion', 'I,SizeOfCode', 'I,SizeOfInitializedData', 'I,SizeOfUninitializedData', 'I,AddressOfEntryPoint', 'I,BaseOfCode', 'Q,ImageBase', 'I,SectionAlignment', 'I,FileAlignment', 'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion', 'H,MajorImageVersion', 'H,MinorImageVersion', 'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion', 'I,Reserved1', 'I,SizeOfImage', 'I,SizeOfHeaders', 'I,CheckSum', 'H,Subsystem', 'H,DllCharacteristics', 'Q,SizeOfStackReserve', 'Q,SizeOfStackCommit', 'Q,SizeOfHeapReserve', 'Q,SizeOfHeapCommit', 'I,LoaderFlags', 'I,NumberOfRvaAndSizes'))
__IMAGE_NT_HEADERS_format__ = ('IMAGE_NT_HEADERS', ('I,Signature',))
__IMAGE_SECTION_HEADER_format__ = ('IMAGE_SECTION_HEADER', ('8s,Name', 'I,Misc,Misc_PhysicalAddress,Misc_VirtualSize', 'I,VirtualAddress', 'I,SizeOfRawData', 'I,PointerToRawData', 'I,PointerToRelocations', 'I,PointerToLinenumbers', 'H,NumberOfRelocations', 'H,NumberOfLinenumbers', 'I,Characteristics'))
__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__ = ('IMAGE_DELAY_IMPORT_DESCRIPTOR', ('I,grAttrs', 'I,szName', 'I,phmod', 'I,pIAT', 'I,pINT', 'I,pBoundIAT', 'I,pUnloadIAT', 'I,dwTimeStamp'))
__IMAGE_IMPORT_DESCRIPTOR_format__ = ('IMAGE_IMPORT_DESCRIPTOR', ('I,OriginalFirstThunk,Characteristics', 'I,TimeDateStamp', 'I,ForwarderChain', 'I,Name', 'I,FirstThunk'))
__IMAGE_EXPORT_DIRECTORY_format__ = ('IMAGE_EXPORT_DIRECTORY', ('I,Characteristics', 'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'I,Name', 'I,Base', 'I,NumberOfFunctions', 'I,NumberOfNames', 'I,AddressOfFunctions', 'I,AddressOfNames', 'I,AddressOfNameOrdinals'))
__IMAGE_RESOURCE_DIRECTORY_format__ = ('IMAGE_RESOURCE_DIRECTORY', ('I,Characteristics', 'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'H,NumberOfNamedEntries', 'H,NumberOfIdEntries'))
__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__ = ('IMAGE_RESOURCE_DIRECTORY_ENTRY',
('I,Name', 'I,OffsetToData'))
__IMAGE_RESOURCE_DATA_ENTRY_format__ = ('IMAGE_RESOURCE_DATA_ENTRY', ('I,OffsetToData', 'I,Size', 'I,CodePage', 'I,Reserved'))
__VS_VERSIONINFO_format__ = ('VS_VERSIONINFO', ('H,Length', 'H,ValueLength', 'H,Type'))
__VS_FIXEDFILEINFO_format__ = ('VS_FIXEDFILEINFO', ('I,Signature', 'I,StrucVersion', 'I,FileVersionMS', 'I,FileVersionLS', 'I,ProductVersionMS', 'I,ProductVersionLS', 'I,FileFlagsMask', 'I,FileFlags', 'I,FileOS', 'I,FileType', 'I,FileSubtype', 'I,FileDateMS', 'I,FileDateLS'))
__StringFileInfo_format__ = ('StringFileInfo', ('H,Length', 'H,ValueLength', 'H,Type'))
__StringTable_format__ = ('StringTable', ('H,Length', 'H,ValueLength', 'H,Type'))
__String_format__ = ('String', ('H,Length', 'H,ValueLength', 'H,Type'))
__Var_format__ = ('Var', ('H,Length', 'H,ValueLength', 'H,Type'))
__IMAGE_THUNK_DATA_format__ = ('IMAGE_THUNK_DATA', ('I,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_THUNK_DATA64_format__ = ('IMAGE_THUNK_DATA', ('Q,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_DEBUG_DIRECTORY_format__ = ('IMAGE_DEBUG_DIRECTORY', ('I,Characteristics', 'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'I,Type', 'I,SizeOfData', 'I,AddressOfRawData', 'I,PointerToRawData'))
__IMAGE_BASE_RELOCATION_format__ = ('IMAGE_BASE_RELOCATION', ('I,VirtualAddress', 'I,SizeOfBlock'))
__IMAGE_BASE_RELOCATION_ENTRY_format__ = ('IMAGE_BASE_RELOCATION_ENTRY', ('H,Data',))
__IMAGE_TLS_DIRECTORY_format__ = ('IMAGE_TLS_DIRECTORY', ('I,StartAddressOfRawData', 'I,EndAddressOfRawData', 'I,AddressOfIndex', 'I,AddressOfCallBacks', 'I,SizeOfZeroFill', 'I,Characteristics'))
__IMAGE_TLS_DIRECTORY64_format__ = ('IMAGE_TLS_DIRECTORY', ('Q,StartAddressOfRawData', 'Q,EndAddressOfRawData', 'Q,AddressOfIndex', 'Q,AddressOfCallBacks', 'I,SizeOfZeroFill', 'I,Characteristics'))
__IMAGE_LOAD_CONFIG_DIRECTORY_format__ = ('IMAGE_LOAD_CONFIG_DIRECTORY', ('I,Size', 'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'I,GlobalFlagsClear', 'I,GlobalFlagsSet', 'I,CriticalSectionDefaultTimeout', 'I,DeCommitFreeBlockThreshold', 'I,DeCommitTotalFreeThreshold', 'I,LockPrefixTable', 'I,MaximumAllocationSize', 'I,VirtualMemoryThreshold', 'I,ProcessHeapFlags', 'I,ProcessAffinityMask', 'H,CSDVersion', 'H,Reserved1', 'I,EditList', 'I,SecurityCookie', 'I,SEHandlerTable', 'I,SEHandlerCount', 'I,GuardCFCheckFunctionPointer', 'I,Reserved2', 'I,GuardCFFunctionTable', 'I,GuardCFFunctionCount', 'I,GuardFlags'))
__IMAGE_LOAD_CONFIG_DIRECTORY64_format__ = ('IMAGE_LOAD_CONFIG_DIRECTORY', ('I,Size', 'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'I,GlobalFlagsClear', 'I,GlobalFlagsSet', 'I,CriticalSectionDefaultTimeout', 'Q,DeCommitFreeBlockThreshold', 'Q,DeCommitTotalFreeThreshold', 'Q,LockPrefixTable', 'Q,MaximumAllocationSize', 'Q,VirtualMemoryThreshold', 'Q,ProcessAffinityMask', 'I,ProcessHeapFlags', 'H,CSDVersion', 'H,Reserved1', 'Q,EditList', 'Q,SecurityCookie', 'Q,SEHandlerTable', 'Q,SEHandlerCount', 'Q,GuardCFCheckFunctionPointer', 'Q,Reserved2', 'Q,GuardCFFunctionTable', 'Q,GuardCFFunctionCount', 'I,GuardFlags'))
__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ = ('IMAGE_BOUND_IMPORT_DESCRIPTOR', ('I,TimeDateStamp', 'H,OffsetModuleName', 'H,NumberOfModuleForwarderRefs'))
__IMAGE_BOUND_FORWARDER_REF_format__ = ('IMAGE_BOUND_FORWARDER_REF', ('I,TimeDateStamp', 'H,OffsetModuleName', 'H,Reserved'))
def __init__(self, name=None, data=None, fast_load=None):
self.sections = []
self._PE__warnings = []
self.PE_TYPE = None
if name is None:
if data is None:
raise ValueError('Must supply either name or data')
self.__structures__ = []
self._PE__from_file = None
if not fast_load:
fast_load = globals()['fast_load']
try:
self.__parse__(name, data, fast_load)
except:
self.close()
raise
def close(self):
if self._PE__from_file is True:
if hasattr(self, '__data__'):
if isinstance(mmap.mmap, type) and isinstance(self.__data__, mmap.mmap) or 'mmap.mmap' in repr(type(self.__data__)):
self.__data__.close()
del self.__data__
def __unpack_data__(self, format, data, file_offset):
"""Apply structure format to raw data.
Returns and unpacked structure object if successful, None otherwise.
"""
structure = Structure(format, file_offset=file_offset)
try:
structure.__unpack__(data)
except PEFormatError as err:
self._PE__warnings.append(('Corrupt header "{0}" at file offset {1}. Exception: {2}').format(format[0], file_offset, err))
return
self.__structures__.append(structure)
return structure
def __parse__(self, fname, data, fast_load):
"""Parse a Portable Executable file.
Loads a PE file, parsing all its structures and making them available
through the instance's attributes.
"""
if fname is not None:
stat = os.stat(fname)
if stat.st_size == 0:
raise PEFormatError('The file is empty')
fd = None
try:
fd = open(fname, 'rb')
self.fileno = fd.fileno()
if hasattr(mmap, 'MAP_PRIVATE'):
self.__data__ = mmap.mmap(self.fileno, 0, mmap.MAP_PRIVATE)
else:
self.__data__ = mmap.mmap(self.fileno, 0, access=mmap.ACCESS_READ)
self._PE__from_file = True
except IOError as excp:
exception_msg = ('{0}').format(excp)
if exception_msg:
exception_msg = ': %s' % exception_msg
raise Exception(("Unable to access file '{0}'{1}").format(fname, exception_msg))
finally:
if fd is not None:
fd.close()
else:
if data is not None:
self.__data__ = data
self._PE__from_file = False
if not fast_load:
for byte, byte_count in Counter(bytearray(self.__data__)).items():
if byte == 0 and 1.0 * byte_count / len(self.__data__) > 0.5 or byte != 0 and 1.0 * byte_count / len(self.__data__) > 0.15:
self._PE__warnings.append(("Byte 0x{0:02x} makes up {1:.4f}% of the file's contents. This may indicate truncation / malformation.").format(byte, 100.0 * byte_count / len(self.__data__)))
dos_header_data = self.__data__[:64]
if len(dos_header_data) != 64:
raise PEFormatError('Unable to read the DOS Header, possibly a truncated file.')
self.DOS_HEADER = self.__unpack_data__(self.__IMAGE_DOS_HEADER_format__,
dos_header_data,
file_offset=0)
if self.DOS_HEADER.e_magic == IMAGE_DOSZM_SIGNATURE:
raise PEFormatError('Probably a ZM Executable (not a PE file).')
if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE:
raise PEFormatError('DOS Header magic not found.')
if self.DOS_HEADER.e_lfanew > len(self.__data__):
raise PEFormatError('Invalid e_lfanew value, probably not a PE file')
nt_headers_offset = self.DOS_HEADER.e_lfanew
self.NT_HEADERS = self.__unpack_data__(self.__IMAGE_NT_HEADERS_format__,
self.__data__[nt_headers_offset:nt_headers_offset + 8],
file_offset=nt_headers_offset)
if not self.NT_HEADERS or not self.NT_HEADERS.Signature:
raise PEFormatError('NT Headers not found.')
if 65535 & self.NT_HEADERS.Signature == IMAGE_NE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a NE file')
if 65535 & self.NT_HEADERS.Signature == IMAGE_LE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a LE file')
if 65535 & self.NT_HEADERS.Signature == IMAGE_LX_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a LX file')
if 65535 & self.NT_HEADERS.Signature == IMAGE_TE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a TE file')
if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature.')
self.FILE_HEADER = self.__unpack_data__(self.__IMAGE_FILE_HEADER_format__,
self.__data__[nt_headers_offset + 4:nt_headers_offset + 4 + 32],
file_offset=nt_headers_offset + 4)
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags)
optional_header_offset = nt_headers_offset + 4 + self.FILE_HEADER.sizeof()
sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader
self.OPTIONAL_HEADER = self.__unpack_data__(self.__IMAGE_OPTIONAL_HEADER_format__,
self.__data__[optional_header_offset:optional_header_offset + 256],
file_offset=optional_header_offset)
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69
if self.OPTIONAL_HEADER is None:
if len(self.__data__[optional_header_offset:optional_header_offset + 512]) >= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE:
padding_length = 128
padded_data = self.__data__[optional_header_offset:optional_header_offset + 512] + b'\x00' * padding_length
self.OPTIONAL_HEADER = self.__unpack_data__(self.__IMAGE_OPTIONAL_HEADER_format__,
padded_data,
file_offset=optional_header_offset)
if self.OPTIONAL_HEADER is not None:
if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE
else:
if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS
self.OPTIONAL_HEADER = self.__unpack_data__(self.__IMAGE_OPTIONAL_HEADER64_format__,
self.__data__[optional_header_offset:optional_header_offset + 512],
file_offset=optional_header_offset)
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 73
if self.OPTIONAL_HEADER is None:
if len(self.__data__[optional_header_offset:optional_header_offset + 512]) >= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE:
padding_length = 128
padded_data = self.__data__[optional_header_offset:optional_header_offset + 512] + b'\x00' * padding_length
self.OPTIONAL_HEADER = self.__unpack_data__(self.__IMAGE_OPTIONAL_HEADER64_format__,
padded_data,
file_offset=optional_header_offset)
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
if self.OPTIONAL_HEADER is None:
raise PEFormatError('No Optional Header found, invalid PE32 or PE32+ file.')
if self.PE_TYPE is None:
self._PE__warnings.append(('Invalid type 0x{0:04x} in Optional Header.').format(self.OPTIONAL_HEADER.Magic))
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')
set_flags(self.OPTIONAL_HEADER, self.OPTIONAL_HEADER.DllCharacteristics, dll_characteristics_flags)
self.OPTIONAL_HEADER.DATA_DIRECTORY = []
offset = optional_header_offset + self.OPTIONAL_HEADER.sizeof()
self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER
self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER
if self.OPTIONAL_HEADER.AddressOfEntryPoint < self.OPTIONAL_HEADER.SizeOfHeaders:
self._PE__warnings.append('SizeOfHeaders is smaller than AddressOfEntryPoint: this file cannot run under Windows 8.')
if self.OPTIONAL_HEADER.NumberOfRvaAndSizes > 16:
self._PE__warnings.append('Suspicious NumberOfRvaAndSizes in the Optional Header. Normal values are never larger than 0x10, the value is: 0x%x' % self.OPTIONAL_HEADER.NumberOfRvaAndSizes)
MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES = 256
for i in range(int(2147483647 & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)):
if len(self.__data__) - offset == 0:
break
if len(self.__data__) - offset < 8:
data = self.__data__[offset:] + b'\x00\x00\x00\x00\x00\x00\x00\x00'
else:
data = self.__data__[offset:offset + MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES]
dir_entry = self.__unpack_data__(self.__IMAGE_DATA_DIRECTORY_format__,
data,
file_offset=offset)
if dir_entry is None:
break
try:
dir_entry.name = DIRECTORY_ENTRY[i]
except (KeyError, AttributeError):
break
offset += dir_entry.sizeof()
self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry)
if offset >= optional_header_offset + self.OPTIONAL_HEADER.sizeof() + 128:
pass
break
offset = self.parse_sections(sections_offset)
rawDataPointers = [self.adjust_FileAlignment(s.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment) for s in self.sections if s.PointerToRawData > 0]
if len(rawDataPointers) > 0:
lowest_section_offset = min(rawDataPointers)
else:
lowest_section_offset = None
if not lowest_section_offset or lowest_section_offset < offset:
self.header = self.__data__[:offset]
else:
self.header = self.__data__[:lowest_section_offset]
if self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None:
ep_offset = self.get_offset_from_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint)
if ep_offset > len(self.__data__):
self._PE__warnings.append('Possibly corrupt file. AddressOfEntryPoint lies outside the file. AddressOfEntryPoint: 0x%x' % self.OPTIONAL_HEADER.AddressOfEntryPoint)
else:
self._PE__warnings.append("AddressOfEntryPoint lies outside the sections' boundaries. AddressOfEntryPoint: 0x%x" % self.OPTIONAL_HEADER.AddressOfEntryPoint)
if not fast_load:
self.full_load()
def parse_rich_header(self):
"""Parses the rich header
see http://www.ntcore.com/files/richsign.htm for more information
Structure:
00 DanS ^ checksum, checksum, checksum, checksum
10 Symbol RVA ^ checksum, Symbol size ^ checksum...
...
XX Rich, checksum, 0, 0,...
"""
DANS = 1399742788
RICH = 1751345490
rich_index = self.__data__.find(b'Rich', 128, self.OPTIONAL_HEADER.get_file_offset())
if rich_index == -1:
return
try:
rich_data = self.get_data(128, rich_index + 8)
rich_data = rich_data[:4 * int(len(rich_data) / 4)]
data = list(struct.unpack(('<{0}I').format(int(len(rich_data) / 4)), rich_data))
if RICH not in data:
return
except PEFormatError:
return
key = struct.pack('<L', data[data.index(RICH) + 1])
result = {'key': key}
raw_data = rich_data[:rich_data.find(b'Rich')]
result['raw_data'] = raw_data
ord_ = lambda c: ord(c) if not isinstance(c, int) else c
clear_data = bytearray()
for i in range(len(raw_data)):
clear_data.append(ord_(raw_data[i]) ^ ord_(key[i % len(key)]))
result['clear_data'] = bytes(clear_data)
checksum = data[1]
if data[0] ^ checksum != DANS or data[2] != checksum or data[3] != checksum:
return
result['checksum'] = checksum
headervalues = []
result['values'] = headervalues
data = data[4:]
for i in range(int(len(data) / 2)):
if data[2 * i] == RICH:
if data[2 * i + 1] != checksum:
self._PE__warnings.append('Rich Header is malformed')
break
headervalues += [data[2 * i] ^ checksum, data[2 * i + 1] ^ checksum]
return result
def get_warnings(self):
"""Return the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method returns the
full list.
"""
return self._PE__warnings
def show_warnings(self):
"""Print the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method prints the
full list to standard output.
"""
for warning in self._PE__warnings:
print('>', warning)
def full_load(self):
"""Process the data directories.
This method will load the data directories which might not have
been loaded if the "fast_load" option was used.
"""
self.parse_data_directories()
class RichHeader(object):
pass
rich_header = self.parse_rich_header()
if rich_header:
self.RICH_HEADER = RichHeader()
self.RICH_HEADER.checksum = rich_header.get('checksum', None)
self.RICH_HEADER.values = rich_header.get('values', None)
self.RICH_HEADER.key = rich_header.get('key', None)
self.RICH_HEADER.raw_data = rich_header.get('raw_data', None)
self.RICH_HEADER.clear_data = rich_header.get('clear_data', None)
else:
self.RICH_HEADER = None
def write(self, filename=None):
"""Write the PE file.
This function will process all headers and components
of the PE file and include all changes made (by just
assigning to attributes in the PE objects) and write
the changes back to a file whose name is provided as
an argument. The filename is optional, if not
provided the data will be returned as a 'str' object.
"""
file_data = bytearray(self.__data__)
for structure in self.__structures__:
struct_data = bytearray(structure.__pack__())
offset = structure.get_file_offset()
file_data[offset:offset + len(struct_data)] = struct_data
if hasattr(self, 'VS_VERSIONINFO'):
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
for key, entry in list(st_entry.entries.items()):
offsets = st_entry.entries_offsets[key]
lengths = st_entry.entries_lengths[key]
if len(entry) > lengths[1]:
l = entry.decode('utf-8').encode('utf-16le')
file_data[offsets[1]:offsets[1] + lengths[1] * 2] = l[:lengths[1] * 2]
else:
encoded_data = entry.decode('utf-8').encode('utf-16le')
file_data[offsets[1]:offsets[1] + len(encoded_data)] = encoded_data
new_file_data = file_data
if not filename:
return new_file_data
f = open(filename, 'wb+')
f.write(new_file_data)
f.close()
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
MAX_SIMULTANEOUS_ERRORS = 3
for i in range(self.FILE_HEADER.NumberOfSections):
simultaneous_errors = 0
section = SectionStructure(self.__IMAGE_SECTION_HEADER_format__, pe=self)
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section_data = self.__data__[section_offset:section_offset + section.sizeof()]
if count_zeroes(section_data) == section.sizeof():
self._PE__warnings.append(('Invalid section {0}. Contents are null-bytes.').format(i))
break
if len(section_data) == 0:
self._PE__warnings.append(("Invalid section {0}. No data in the file (is this corkami's virtsectblXP?).").format(i))
break
section.__unpack__(section_data)
self.__structures__.append(section)
if section.SizeOfRawData + section.PointerToRawData > len(self.__data__):
simultaneous_errors += 1
self._PE__warnings.append(('Error parsing section {0}. SizeOfRawData is larger than file.').format(i))
if self.adjust_FileAlignment(section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment) > len(self.__data__):
simultaneous_errors += 1
self._PE__warnings.append(('Error parsing section {0}. PointerToRawData points beyond the end of the file.').format(i))
if section.Misc_VirtualSize > 268435456:
simultaneous_errors += 1
self._PE__warnings.append(('Suspicious value found parsing section {0}. VirtualSize is extremely large > 256MiB.').format(i))
if self.adjust_SectionAlignment(section.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment) > 268435456:
simultaneous_errors += 1
self._PE__warnings.append(('Suspicious value found parsing section {0}. VirtualAddress is beyond 0x10000000.').format(i))
if self.OPTIONAL_HEADER.FileAlignment != 0:
if section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment != 0:
simultaneous_errors += 1
self._PE__warnings.append(('Error parsing section {0}. PointerToRawData should normally be a multiple of FileAlignment, this might imply the file is trying to confuse tools which parse this incorrectly.').format(i))
if simultaneous_errors >= MAX_SIMULTANEOUS_ERRORS:
self._PE__warnings.append('Too many warnings parsing section. Aborting.')
break
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
set_flags(section, section.Characteristics, section_flags)
if section.__dict__.get('IMAGE_SCN_MEM_WRITE', False):
pass
if section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False):
if section.Name == 'PAGE':
if self.is_driver():
pass
self._PE__warnings.append('Suspicious flags set for section %d. ' % i + 'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. This might indicate a packed executable.')
self.sections.append(section)
self.sections.sort(key=lambda a: a.VirtualAddress)
for idx, section in enumerate(self.sections):
if idx == len(self.sections) - 1:
section.next_section_virtual_address = None
else:
section.next_section_virtual_address = self.sections[idx + 1].VirtualAddress
if self.FILE_HEADER.NumberOfSections > 0:
if self.sections:
return offset + self.sections[0].sizeof() * self.FILE_HEADER.NumberOfSections
return offset
def parse_data_directories(self, directories=None, forwarded_exports_only=False, import_dllnames_only=False):
"""Parse and process the PE file's data directories.
If the optional argument 'directories' is given, only
the directories at the specified indexes will be parsed.
Such functionality allows parsing of areas of interest
without the burden of having to parse all others.
The directories can then be specified as:
For export / import only:
directories = [ 0, 1 ]
or (more verbosely):
directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],
DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ]
If 'directories' is a list, the ones that are processed will be removed,
leaving only the ones that are not present in the image.
If `forwarded_exports_only` is True, the IMAGE_DIRECTORY_ENTRY_EXPORT
attribute will only contain exports that are forwarded to another DLL.
If `import_dllnames_only` is True, symbols will not be parsed from
the import table and the entries in the IMAGE_DIRECTORY_ENTRY_IMPORT
attribute will not have a `symbols` attribute.
"""
directory_parsing = (
(
'IMAGE_DIRECTORY_ENTRY_IMPORT', self.parse_import_directory),
(
'IMAGE_DIRECTORY_ENTRY_EXPORT', self.parse_export_directory),
(
'IMAGE_DIRECTORY_ENTRY_RESOURCE', self.parse_resources_directory),
(
'IMAGE_DIRECTORY_ENTRY_DEBUG', self.parse_debug_directory),
(
'IMAGE_DIRECTORY_ENTRY_BASERELOC', self.parse_relocations_directory),
(
'IMAGE_DIRECTORY_ENTRY_TLS', self.parse_directory_tls),
(
'IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', self.parse_directory_load_config),
(
'IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', self.parse_delay_import_directory),
(
'IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', self.parse_directory_bound_imports))
if directories is not None:
if not isinstance(directories, (tuple, list)):
directories = [
directories]
for entry in directory_parsing:
try:
directory_index = DIRECTORY_ENTRY[entry[0]]
dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[directory_index]
except IndexError:
break
if directories is None or directory_index in directories:
if dir_entry.VirtualAddress:
if forwarded_exports_only:
if entry[0] == 'IMAGE_DIRECTORY_ENTRY_EXPORT':
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size, forwarded_only=True)
if import_dllnames_only and entry[0] == 'IMAGE_DIRECTORY_ENTRY_IMPORT':
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size, dllnames_only=True)
else:
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size)
if value:
setattr(self, entry[0][6:], value)
if directories is not None:
if isinstance(directories, list):
if entry[0] in directories:
directories.remove(directory_index)
def parse_directory_bound_imports(self, rva, size):
""""""
bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__)
bnd_descr_size = bnd_descr.sizeof()
start = rva
bound_imports = []
while 1:
bnd_descr = self.__unpack_data__(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__,
self.__data__[rva:rva + bnd_descr_size],
file_offset=rva)
if bnd_descr is None:
self._PE__warnings.append("The Bound Imports directory exists but can't be parsed.")
return
if bnd_descr.all_zeroes():
break
rva += bnd_descr.sizeof()
section = self.get_section_by_offset(rva)
file_offset = self.get_offset_from_rva(rva)
if section is None:
safety_boundary = len(self.__data__) - file_offset
sections_after_offset = [s.PointerToRawData for s in self.sections if s.PointerToRawData > file_offset]
if sections_after_offset:
first_section_after_offset = min(sections_after_offset)
section = self.get_section_by_offset(first_section_after_offset)
if section is not None:
safety_boundary = section.PointerToRawData - file_offset
else:
safety_boundary = section.PointerToRawData + len(section.get_data()) - file_offset
if not section:
self._PE__warnings.append(('RVA of IMAGE_BOUND_IMPORT_DESCRIPTOR points to an invalid address: {0:x}').format(rva))
return
forwarder_refs = []
for idx in range(min(bnd_descr.NumberOfModuleForwarderRefs, int(safety_boundary / 8))):
bnd_frwd_ref = self.__unpack_data__(self.__IMAGE_BOUND_FORWARDER_REF_format__,
self.__data__[rva:rva + bnd_descr_size],
file_offset=rva)
if not bnd_frwd_ref:
raise PEFormatError('IMAGE_BOUND_FORWARDER_REF cannot be read')
rva += bnd_frwd_ref.sizeof()
offset = start + bnd_frwd_ref.OffsetModuleName
name_str = self.get_string_from_data(0, self.__data__[offset:offset + MAX_STRING_LENGTH])
if name_str:
invalid_chars = [c for c in bytearray(name_str) if chr(c) not in string.printable]
if len(name_str) > 256 or invalid_chars:
break
forwarder_refs.append(BoundImportRefData(struct=bnd_frwd_ref,
name=name_str))
offset = start + bnd_descr.OffsetModuleName
name_str = self.get_string_from_data(0, self.__data__[offset:offset + MAX_STRING_LENGTH])
if name_str:
invalid_chars = [c for c in bytearray(name_str) if chr(c) not in string.printable]
if len(name_str) > 256 or invalid_chars:
break
if not name_str:
break
bound_imports.append(BoundImportDescData(struct=bnd_descr,
name=name_str,
entries=forwarder_refs))
return bound_imports
def parse_directory_tls(self, rva, size):
""""""
format = self.__IMAGE_TLS_DIRECTORY_format__
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_TLS_DIRECTORY64_format__
try:
tls_struct = self.__unpack_data__(format,
self.get_data(rva, Structure(format).sizeof()),
file_offset=self.get_offset_from_rva(rva))
except PEFormatError:
self._PE__warnings.append("Invalid TLS information. Can't read data at RVA: 0x%x" % rva)
tls_struct = None
if not tls_struct:
return
else:
return TlsData(struct=tls_struct)
def parse_directory_load_config(self, rva, size):
""""""
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_LOAD_CONFIG_DIRECTORY_format__
else:
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_LOAD_CONFIG_DIRECTORY64_format__
else:
self._PE__warnings.append("Don't know how to parse LOAD_CONFIG information for non-PE32/PE32+ file")
return
load_config_struct = None
try:
load_config_struct = self.__unpack_data__(format,
self.get_data(rva, Structure(format).sizeof()),
file_offset=self.get_offset_from_rva(rva))
except PEFormatError:
self._PE__warnings.append("Invalid LOAD_CONFIG information. Can't read data at RVA: 0x%x" % rva)
if not load_config_struct:
return
return LoadConfigData(struct=load_config_struct)
def parse_relocations_directory(self, rva, size):
""""""
rlc_size = Structure(self.__IMAGE_BASE_RELOCATION_format__).sizeof()
end = rva + size
relocations = []
while rva < end:
try:
rlc = self.__unpack_data__(self.__IMAGE_BASE_RELOCATION_format__,
self.get_data(rva, rlc_size),
file_offset=self.get_offset_from_rva(rva))
except PEFormatError:
self._PE__warnings.append("Invalid relocation information. Can't read data at RVA: 0x%x" % rva)
rlc = None
if not rlc:
break
if rlc.VirtualAddress > self.OPTIONAL_HEADER.SizeOfImage:
self._PE__warnings.append('Invalid relocation information. VirtualAddress outside of Image: 0x%x' % rlc.VirtualAddress)
break
if rlc.SizeOfBlock > self.OPTIONAL_HEADER.SizeOfImage:
self._PE__warnings.append('Invalid relocation information. SizeOfBlock too large: %d' % rlc.SizeOfBlock)
break
reloc_entries = self.parse_relocations(rva + rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock - rlc_size)
relocations.append(BaseRelocationData(struct=rlc,
entries=reloc_entries))
if not rlc.SizeOfBlock:
break
rva += rlc.SizeOfBlock
return relocations
def parse_relocations(self, data_rva, rva, size):
""""""
try:
data = self.get_data(data_rva, size)
file_offset = self.get_offset_from_rva(data_rva)
except PEFormatError as excp:
self._PE__warnings.append('Bad RVA in relocation data: 0x%x' % data_rva)
return []
entries = []
offsets_and_type = []
for idx in range(int(len(data) / 2)):
entry = self.__unpack_data__(self.__IMAGE_BASE_RELOCATION_ENTRY_format__,
data[idx * 2:(idx + 1) * 2],
file_offset=file_offset)
if not entry:
break
word = entry.Data
reloc_type = word >> 12
reloc_offset = word & 4095
if (reloc_offset, reloc_type) in offsets_and_type:
self._PE__warnings.append('Overlapping offsets in relocation data data at RVA: 0x%x' % (reloc_offset + rva))
break
if len(offsets_and_type) >= 1000:
offsets_and_type.pop()
offsets_and_type.insert(0, (reloc_offset, reloc_type))
entries.append(RelocationData(struct=entry,
type=reloc_type,
base_rva=rva,
rva=reloc_offset + rva))
file_offset += entry.sizeof()
return entries
def parse_debug_directory(self, rva, size):
""""""
dbg_size = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__).sizeof()
debug = []
for idx in range(int(size / dbg_size)):
try:
data = self.get_data(rva + dbg_size * idx, dbg_size)
except PEFormatError as e:
self._PE__warnings.append("Invalid debug information. Can't read data at RVA: 0x%x" % rva)
return
dbg = self.__unpack_data__(self.__IMAGE_DEBUG_DIRECTORY_format__,
data,
file_offset=self.get_offset_from_rva(rva + dbg_size * idx))
if not dbg:
return
dbg_type = None
if dbg.Type == 1:
pass
else:
if dbg.Type == 2:
dbg_type_offset = dbg.PointerToRawData
dbg_type_size = dbg.SizeOfData
dbg_type_data = self.__data__[dbg_type_offset:dbg_type_offset + dbg_type_size]
if dbg_type_data[:4] == b'RSDS':
__CV_INFO_PDB70_format__ = ['CV_INFO_PDB70',
[
'I,CvSignature',
'I,Signature_Data1',
'H,Signature_Data2',
'H,Signature_Data3',
'H,Signature_Data4',
'H,Signature_Data5',
'I,Signature_Data6',
'I,Age']]
pdbFileName_size = dbg_type_size - Structure(__CV_INFO_PDB70_format__).sizeof()
if pdbFileName_size > 0:
__CV_INFO_PDB70_format__[1].append(('{0}s,PdbFileName').format(pdbFileName_size))
dbg_type = self.__unpack_data__(__CV_INFO_PDB70_format__, dbg_type_data, dbg_type_offset)
else:
if dbg_type_data[:4] == b'NB10':
__CV_INFO_PDB20_format__ = ['CV_INFO_PDB20',
[
'I,CvHeaderSignature',
'I,CvHeaderOffset',
'I,Signature',
'I,Age']]
pdbFileName_size = dbg_type_size - Structure(__CV_INFO_PDB20_format__).sizeof()
if pdbFileName_size > 0:
__CV_INFO_PDB20_format__[1].append(('{0}s,PdbFileName').format(pdbFileName_size))
dbg_type = self.__unpack_data__(__CV_INFO_PDB20_format__, dbg_type_data, dbg_type_offset)
else:
if dbg.Type == 4:
dbg_type_offset = dbg.PointerToRawData
dbg_type_size = dbg.SizeOfData
dbg_type_data = self.__data__[dbg_type_offset:dbg_type_offset + dbg_type_size]
___IMAGE_DEBUG_MISC_format__ = ['IMAGE_DEBUG_MISC',
[
'I,DataType',
'I,Length',
'B,Unicode',
'B,Reserved1',
'H,Reserved2']]
dbg_type_partial = self.__unpack_data__(___IMAGE_DEBUG_MISC_format__, dbg_type_data, dbg_type_offset)
if dbg_type_partial:
if dbg_type_partial.Unicode in (0, 1):
data_size = dbg_type_size - Structure(___IMAGE_DEBUG_MISC_format__).sizeof()
if data_size > 0:
___IMAGE_DEBUG_MISC_format__[1].append(('{0}s,Data').format(data_size))
dbg_type = self.__unpack_data__(___IMAGE_DEBUG_MISC_format__, dbg_type_data, dbg_type_offset)
debug.append(DebugData(struct=dbg,
entry=dbg_type))
return debug
def parse_resources_directory(self, rva, size=0, base_rva=None, level=0, dirs=None):
"""Parse the resources directory.
Given the RVA of the resources directory, it will process all
its entries.
The root will have the corresponding member of its structure,
IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the
entries in the directory.
Those entries will have, correspondingly, all the structure's
members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one,
"directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure
representing upper layers of the tree. This one will also have
an 'entries' attribute, pointing to the 3rd, and last, level.
Another directory with more entries. Those last entries will
have a new attribute (both 'leaf' or 'data_entry' can be used to
access it). This structure finally points to the resource data.
All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY,
are available as its attributes.
"""
if dirs is None:
dirs = [
rva]
if base_rva is None:
base_rva = rva
resources_section = self.get_section_by_rva(rva)
try:
data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_format__).sizeof())
except PEFormatError as e:
self._PE__warnings.append("Invalid resources directory. Can't read directory data at RVA: 0x%x" % rva)
return
resource_dir = self.__unpack_data__(self.__IMAGE_RESOURCE_DIRECTORY_format__,
data, file_offset=self.get_offset_from_rva(rva))
if resource_dir is None:
self._PE__warnings.append("Invalid resources directory. Can't parse directory data at RVA: 0x%x" % rva)
return
else:
dir_entries = []
rva += resource_dir.sizeof()
number_of_entries = resource_dir.NumberOfNamedEntries + resource_dir.NumberOfIdEntries
MAX_ALLOWED_ENTRIES = 4096
if number_of_entries > MAX_ALLOWED_ENTRIES:
self._PE__warnings.append('Error parsing the resources directory. The directory contains %d entries (>%s)' % (
number_of_entries, MAX_ALLOWED_ENTRIES))
return
strings_to_postprocess = list()
last_name_begin_end = None
for idx in range(number_of_entries):
res = self.parse_resource_entry(rva)
if res is None:
self._PE__warnings.append('Error parsing the resources directory, Entry %d is invalid, RVA = 0x%x. ' % (
idx, rva))
break
entry_name = None
entry_id = None
name_is_string = (res.Name & 2147483648) >> 31
if not name_is_string:
entry_id = res.Name
else:
ustr_offset = base_rva + res.NameOffset
try:
entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset)
if last_name_begin_end:
if last_name_begin_end[0] < ustr_offset:
if last_name_begin_end[1] >= ustr_offset:
strings_to_postprocess.pop()
self._PE__warnings.append('Error parsing the resources directory, attempting to read entry name. Entry names overlap 0x%x' % ustr_offset)
break
last_name_begin_end = (
ustr_offset, ustr_offset + entry_name.get_pascal_16_length())
strings_to_postprocess.append(entry_name)
except PEFormatError as excp:
self._PE__warnings.append("Error parsing the resources directory, attempting to read entry name. Can't read unicode string at offset 0x%x" % ustr_offset)
if res.DataIsDirectory:
if base_rva + res.OffsetToDirectory in dirs:
break
else:
entry_directory = self.parse_resources_directory(base_rva + res.OffsetToDirectory,
size - (rva - base_rva),
base_rva=base_rva,
level=level + 1,
dirs=dirs + [base_rva + res.OffsetToDirectory])
if not entry_directory:
break
strings = None
if entry_id == RESOURCE_TYPE['RT_STRING']:
strings = dict()
for resource_id in entry_directory.entries:
if hasattr(resource_id, 'directory'):
resource_strings = dict()
for resource_lang in resource_id.directory.entries:
if not resource_lang is None:
if not not hasattr(resource_lang, 'data'):
if not resource_lang.data.struct.Size is None:
if resource_id.id is None:
pass
else:
string_entry_rva = resource_lang.data.struct.OffsetToData
string_entry_size = resource_lang.data.struct.Size
string_entry_id = resource_id.id
try:
string_entry_data = self.get_data(string_entry_rva, string_entry_size)
except:
self._PE__warnings.append('Error parsing resource of type RT_STRING at RVA 0x%x with size %d' % (
string_entry_rva, string_entry_size))
continue
parse_strings(string_entry_data, (int(string_entry_id) - 1) * 16, resource_strings)
strings.update(resource_strings)
resource_id.directory.strings = resource_strings
dir_entries.append(ResourceDirEntryData(struct=res,
name=entry_name,
id=entry_id,
directory=entry_directory))
else:
struct = self.parse_resource_data_entry(base_rva + res.OffsetToDirectory)
if struct:
entry_data = ResourceDataEntryData(struct=struct,
lang=res.Name & 1023,
sublang=res.Name >> 10)
dir_entries.append(ResourceDirEntryData(struct=res,
name=entry_name,
id=entry_id,
data=entry_data))
else:
break
if level == 0:
if res.Id == RESOURCE_TYPE['RT_VERSION']:
if len(dir_entries) > 0:
last_entry = dir_entries[-1]
rt_version_struct = None
try:
rt_version_struct = last_entry.directory.entries[0].directory.entries[0].data.struct
except:
pass
if rt_version_struct is not None:
self.parse_version_information(rt_version_struct)
rva += res.sizeof()
string_rvas = [s.get_rva() for s in strings_to_postprocess]
string_rvas.sort()
for idx, s in enumerate(strings_to_postprocess):
s.render_pascal_16()
resource_directory_data = ResourceDirData(struct=resource_dir,
entries=dir_entries)
return resource_directory_data
def parse_resource_data_entry(self, rva):
"""Parse a data entry from the resources directory."""
try:
data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DATA_ENTRY_format__).sizeof())
except PEFormatError as excp:
self._PE__warnings.append('Error parsing a resource directory data entry, the RVA is invalid: 0x%x' % rva)
return
data_entry = self.__unpack_data__(self.__IMAGE_RESOURCE_DATA_ENTRY_format__,
data, file_offset=self.get_offset_from_rva(rva))
return data_entry
def parse_resource_entry(self, rva):
"""Parse a directory entry from the resources directory."""
try:
data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__).sizeof())
except PEFormatError as excp:
return
resource = self.__unpack_data__(self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__,
data, file_offset=self.get_offset_from_rva(rva))
if resource is None:
return
else:
resource.NameOffset = resource.Name & 2147483647
resource._PE__pad = resource.Name & 4294901760
resource.Id = resource.Name & 65535
resource.DataIsDirectory = (resource.OffsetToData & 2147483648) >> 31
resource.OffsetToDirectory = resource.OffsetToData & 2147483647
return resource
def parse_version_information(self, version_struct):
"""Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key / value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var.
"""
start_offset = self.get_offset_from_rva(version_struct.OffsetToData)
raw_data = self.__data__[start_offset:start_offset + version_struct.Size]
versioninfo_struct = self.__unpack_data__(self.__VS_VERSIONINFO_format__,
raw_data, file_offset=start_offset)
if versioninfo_struct is None:
return
ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof()
section = self.get_section_by_rva(ustr_offset)
section_end = None
if section:
section_end = section.VirtualAddress + max(section.SizeOfRawData, section.Misc_VirtualSize)
versioninfo_string = None
try:
if section_end is None:
versioninfo_string = self.get_string_u_at_rva(ustr_offset,
encoding='ascii')
else:
versioninfo_string = self.get_string_u_at_rva(ustr_offset,
section_end - ustr_offset >> 1, encoding='ascii')
except PEFormatError as excp:
self._PE__warnings.append("Error parsing the version information, attempting to read VS_VERSION_INFO string. Can't read unicode string at offset 0x%x" % ustr_offset)
if versioninfo_string == None:
self._PE__warnings.append(('Invalid VS_VERSION_INFO block: {0}').format(versioninfo_string))
return
if versioninfo_string is not None:
if versioninfo_string != b'VS_VERSION_INFO':
if len(versioninfo_string) > 128:
excerpt = versioninfo_string[:128].decode('ascii')
excerpt = excerpt[:excerpt.rfind('\\u')]
versioninfo_string = b(('{0} ... ({1} bytes, too long to display)').format(excerpt, len(versioninfo_string)))
self._PE__warnings.append(('Invalid VS_VERSION_INFO block: {0}').format(versioninfo_string.decode('ascii').replace('\x00', '\\00')))
return
self.VS_VERSIONINFO = versioninfo_struct
self.VS_VERSIONINFO.Key = versioninfo_string
if versioninfo_string is None:
versioninfo_string = ''
fixedfileinfo_offset = self.dword_align(versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1), version_struct.OffsetToData)
fixedfileinfo_struct = self.__unpack_data__(self.__VS_FIXEDFILEINFO_format__,
raw_data[fixedfileinfo_offset:],
file_offset=start_offset + fixedfileinfo_offset)
if not fixedfileinfo_struct:
return
self.VS_FIXEDFILEINFO = fixedfileinfo_struct
stringfileinfo_offset = self.dword_align(fixedfileinfo_offset + fixedfileinfo_struct.sizeof(), version_struct.OffsetToData)
original_stringfileinfo_offset = stringfileinfo_offset
self.FileInfo = list()
while 1:
stringfileinfo_struct = self.__unpack_data__(self.__StringFileInfo_format__,
raw_data[stringfileinfo_offset:],
file_offset=start_offset + stringfileinfo_offset)
if stringfileinfo_struct is None:
self._PE__warnings.append('Error parsing StringFileInfo/VarFileInfo struct')
return
ustr_offset = version_struct.OffsetToData + stringfileinfo_offset + versioninfo_struct.sizeof()
try:
stringfileinfo_string = self.get_string_u_at_rva(ustr_offset)
except PEFormatError as excp:
self._PE__warnings.append(("Error parsing the version information, attempting to read StringFileInfo string. Can't read unicode string at offset 0x{0:x}").format(ustr_offset))
break
stringfileinfo_struct.Key = stringfileinfo_string
self.FileInfo.append(stringfileinfo_struct)
if stringfileinfo_string:
if stringfileinfo_string.startswith(b'StringFileInfo'):
if stringfileinfo_struct.Type in (0, 1):
if stringfileinfo_struct.ValueLength == 0:
stringtable_offset = self.dword_align(stringfileinfo_offset + stringfileinfo_struct.sizeof() + 2 * (len(stringfileinfo_string) + 1), version_struct.OffsetToData)
stringfileinfo_struct.StringTable = list()
while True:
stringtable_struct = self.__unpack_data__(self.__StringTable_format__,
raw_data[stringtable_offset:],
file_offset=start_offset + stringtable_offset)
if not stringtable_struct:
break
ustr_offset = version_struct.OffsetToData + stringtable_offset + stringtable_struct.sizeof()
try:
stringtable_string = self.get_string_u_at_rva(ustr_offset)
except PEFormatError as excp:
self._PE__warnings.append(("Error parsing the version information, attempting to read StringTable string. Can't read unicode string at offset 0x{0:x}").format(ustr_offset))
break
stringtable_struct.LangID = stringtable_string
stringtable_struct.entries = dict()
stringtable_struct.entries_offsets = dict()
stringtable_struct.entries_lengths = dict()
stringfileinfo_struct.StringTable.append(stringtable_struct)
entry_offset = self.dword_align(stringtable_offset + stringtable_struct.sizeof() + 2 * (len(stringtable_string) + 1), version_struct.OffsetToData)
while entry_offset < stringtable_offset + stringtable_struct.Length:
string_struct = self.__unpack_data__(self.__String_format__,
raw_data[entry_offset:], file_offset=start_offset + entry_offset)
if not string_struct:
break
ustr_offset = version_struct.OffsetToData + entry_offset + string_struct.sizeof()
try:
key = self.get_string_u_at_rva(ustr_offset)
key_offset = self.get_offset_from_rva(ustr_offset)
except PEFormatError as excp:
self._PE__warnings.append(("Error parsing the version information, attempting to read StringTable Key string. Can't read unicode string at offset 0x{0:x}").format(ustr_offset))
break
value_offset = self.dword_align(2 * (len(key) + 1) + entry_offset + string_struct.sizeof(), version_struct.OffsetToData)
ustr_offset = version_struct.OffsetToData + value_offset
try:
value = self.get_string_u_at_rva(ustr_offset, max_length=string_struct.ValueLength)
value_offset = self.get_offset_from_rva(ustr_offset)
except PEFormatError as excp:
self._PE__warnings.append(("Error parsing the version information, attempting to read StringTable Value string. Can't read unicode string at offset 0x{0:x}").format(ustr_offset))
break
if string_struct.Length == 0:
entry_offset = stringtable_offset + stringtable_struct.Length
else:
entry_offset = self.dword_align(string_struct.Length + entry_offset, version_struct.OffsetToData)
stringtable_struct.entries[key] = value
stringtable_struct.entries_offsets[key] = (key_offset, value_offset)
stringtable_struct.entries_lengths[key] = (len(key), len(value))
new_stringtable_offset = self.dword_align(stringtable_struct.Length + stringtable_offset, version_struct.OffsetToData)
if new_stringtable_offset == stringtable_offset:
break
stringtable_offset = new_stringtable_offset
if stringtable_offset >= stringfileinfo_struct.Length:
break
else:
if stringfileinfo_string:
if stringfileinfo_string.startswith(b'VarFileInfo'):
varfileinfo_struct = stringfileinfo_struct
varfileinfo_struct.name = 'VarFileInfo'
if varfileinfo_struct.Type in (0, 1):
if varfileinfo_struct.ValueLength == 0:
var_offset = self.dword_align(stringfileinfo_offset + varfileinfo_struct.sizeof() + 2 * (len(stringfileinfo_string) + 1), version_struct.OffsetToData)
varfileinfo_struct.Var = list()
while 1:
var_struct = self.__unpack_data__(self.__Var_format__,
raw_data[var_offset:],
file_offset=start_offset + var_offset)
if not var_struct:
break
ustr_offset = version_struct.OffsetToData + var_offset + var_struct.sizeof()
try:
var_string = self.get_string_u_at_rva(ustr_offset)
except PEFormatError as excp:
self._PE__warnings.append(("Error parsing the version information, attempting to read VarFileInfo Var string. Can't read unicode string at offset 0x{0:x}").format(ustr_offset))
break
if var_string is None:
break
varfileinfo_struct.Var.append(var_struct)
varword_offset = self.dword_align(2 * (len(var_string) + 1) + var_offset + var_struct.sizeof(), version_struct.OffsetToData)
orig_varword_offset = varword_offset
while varword_offset < orig_varword_offset + var_struct.ValueLength:
word1 = self.get_word_from_data(raw_data[varword_offset:varword_offset + 2], 0)
word2 = self.get_word_from_data(raw_data[varword_offset + 2:varword_offset + 4], 0)
varword_offset += 4
if isinstance(word1, int):
var_struct.entry = isinstance(word2, int) and {var_string: '0x%04x 0x%04x' % (word1, word2)}
var_offset = self.dword_align(var_offset + var_struct.Length, version_struct.OffsetToData)
if var_offset <= var_offset + var_struct.Length:
break
stringfileinfo_offset = self.dword_align(stringfileinfo_struct.Length + stringfileinfo_offset, version_struct.OffsetToData)
if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length:
pass
break
def parse_export_directory(self, rva, size, forwarded_only=False):
"""Parse the export directory.
Given the RVA of the export directory, it will process all
its entries.
The exports will be made available as a list of ExportData
instances in the 'IMAGE_DIRECTORY_ENTRY_EXPORT' PE attribute.
"""
try:
export_dir = self.__unpack_data__(self.__IMAGE_EXPORT_DIRECTORY_format__,
self.get_data(rva, Structure(self.__IMAGE_EXPORT_DIRECTORY_format__).sizeof()),
file_offset=self.get_offset_from_rva(rva))
except PEFormatError:
self._PE__warnings.append('Error parsing export directory at RVA: 0x%x' % rva)
return
if not export_dir:
return
else:
def length_until_eof(rva):
return len(self.__data__) - self.get_offset_from_rva(rva)
try:
address_of_names = self.get_data(export_dir.AddressOfNames, min(length_until_eof(export_dir.AddressOfNames), export_dir.NumberOfNames * 4))
address_of_name_ordinals = self.get_data(export_dir.AddressOfNameOrdinals, min(length_until_eof(export_dir.AddressOfNameOrdinals), export_dir.NumberOfNames * 4))
address_of_functions = self.get_data(export_dir.AddressOfFunctions, min(length_until_eof(export_dir.AddressOfFunctions), export_dir.NumberOfFunctions * 4))
except PEFormatError:
self._PE__warnings.append('Error parsing export directory at RVA: 0x%x' % rva)
return
else:
exports = []
max_failed_entries_before_giving_up = 10
section = self.get_section_by_rva(export_dir.AddressOfNames)
safety_boundary = len(self.__data__)
if section:
safety_boundary = section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfNames
symbol_counter = Counter()
export_parsing_loop_completed_normally = True
for i in range(min(export_dir.NumberOfNames, int(safety_boundary / 4))):
symbol_ordinal = self.get_word_from_data(address_of_name_ordinals, i)
if symbol_ordinal is not None:
if symbol_ordinal * 4 < len(address_of_functions):
symbol_address = self.get_dword_from_data(address_of_functions, symbol_ordinal)
return
if not symbol_address is None:
if symbol_address == 0:
continue
if symbol_address >= rva:
if symbol_address < rva + size:
forwarder_str = self.get_string_at_rva(symbol_address)
try:
forwarder_offset = self.get_offset_from_rva(symbol_address)
except PEFormatError:
continue
if forwarded_only:
continue
forwarder_str = None
forwarder_offset = None
symbol_name_address = self.get_dword_from_data(address_of_names, i)
if symbol_name_address is None:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
export_parsing_loop_completed_normally = False
break
symbol_name = self.get_string_at_rva(symbol_name_address, MAX_SYMBOL_NAME_LENGTH)
if not is_valid_function_name(symbol_name):
export_parsing_loop_completed_normally = False
break
try:
symbol_name_offset = self.get_offset_from_rva(symbol_name_address)
except PEFormatError:
max_failed_entries_before_giving_up -= 1
export_parsing_loop_completed_normally = max_failed_entries_before_giving_up <= 0 and False
break
try:
symbol_name_offset = self.get_offset_from_rva(symbol_name_address)
except PEFormatError:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
export_parsing_loop_completed_normally = False
break
continue
most_common = symbol_counter.most_common(1)
if most_common:
if most_common[0][1] > 10:
self._PE__warnings.append('Export directory contains more than 10 repeated entries. Assuming corrupt.')
break
symbol_counter[(symbol_name, symbol_address)] += 1
exports.append(ExportData(pe=self,
ordinal=export_dir.Base + symbol_ordinal,
ordinal_offset=self.get_offset_from_rva(export_dir.AddressOfNameOrdinals + 2 * i),
address=symbol_address,
address_offset=self.get_offset_from_rva(export_dir.AddressOfFunctions + 4 * symbol_ordinal),
name=symbol_name,
name_offset=symbol_name_offset,
forwarder=forwarder_str,
forwarder_offset=forwarder_offset))
if not export_parsing_loop_completed_normally:
self._PE__warnings.append('RVA AddressOfNames in the export directory points to an invalid address: %x' % export_dir.AddressOfNames)
ordinals = [exp.ordinal for exp in exports]
max_failed_entries_before_giving_up = 10
section = self.get_section_by_rva(export_dir.AddressOfFunctions)
safety_boundary = len(self.__data__)
safety_boundary = section and section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfFunctions
symbol_counter = Counter()
export_parsing_loop_completed_normally = True
for idx in range(min(export_dir.NumberOfFunctions, int(safety_boundary / 4))):
if idx + export_dir.Base not in ordinals:
try:
symbol_address = self.get_dword_from_data(address_of_functions, idx)
except PEFormatError:
symbol_address = None
if symbol_address is None:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
export_parsing_loop_completed_normally = False
break
if symbol_address == 0:
pass
continue
if symbol_address is not None:
if symbol_address >= rva:
if symbol_address < rva + size:
forwarder_str = self.get_string_at_rva(symbol_address)
forwarder_str = None
most_common = symbol_counter.most_common(1)
if most_common:
if most_common[0][1] > 10:
self._PE__warnings.append('Export directory contains more than 10 repeated ordinal entries. Assuming corrupt.')
break
symbol_counter[symbol_address] += 1
exports.append(ExportData(ordinal=export_dir.Base + idx,
address=symbol_address,
name=None,
forwarder=forwarder_str))
if not export_parsing_loop_completed_normally:
self._PE__warnings.append('RVA AddressOfFunctions in the export directory points to an invalid address: %x' % export_dir.AddressOfFunctions)
return
if not exports:
if export_dir.all_zeroes():
return
return ExportDirData(struct=export_dir, symbols=exports, name=self.get_string_at_rva(export_dir.Name))
def dword_align(self, offset, base):
return (offset + base + 3 & 4294967292) - (base & 4294967292)
def parse_delay_import_directory(self, rva, size):
"""Walk and parse the delay import directory."""
import_descs = []
error_count = 0
while 1:
try:
data = self.get_data(rva, Structure(self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__).sizeof())
except PEFormatError as e:
self._PE__warnings.append('Error parsing the Delay import directory at RVA: 0x%x' % rva)
break
file_offset = self.get_offset_from_rva(rva)
import_desc = self.__unpack_data__(self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__,
data,
file_offset=file_offset)
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
max_len = len(self.__data__) - file_offset
if rva > import_desc.pINT or rva > import_desc.pIAT:
max_len = max(rva - import_desc.pINT, rva - import_desc.pIAT)
import_data = []
try:
import_data = self.parse_imports(import_desc.pINT,
import_desc.pIAT,
None,
max_length=max_len)
except PEFormatError as e:
self._PE__warnings.append(('Error parsing the Delay import directory. Invalid import data at RVA: 0x{0:x} ({1})').format(rva, e.value))
if error_count > 5:
self._PE__warnings.append(('Too may errors parsing the Delay import directory. Invalid import data at RVA: 0x{0:x}').format(rva))
break
if not import_data:
error_count += 1
continue
dll = self.get_string_at_rva(import_desc.szName, MAX_DLL_LENGTH)
if not is_valid_dos_filename(dll):
dll = b('*invalid*')
if dll:
for symbol in import_data:
if symbol.name is None:
funcname = ordlookup.ordLookup(dll.lower(), symbol.ordinal)
symbol.name = funcname and funcname
import_descs.append(ImportDescData(struct=import_desc,
imports=import_data,
dll=dll))
return import_descs
def get_imphash(self):
impstrs = []
exts = [
'ocx', 'sys', 'dll']
if not hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
return ''
else:
for entry in self.DIRECTORY_ENTRY_IMPORT:
if isinstance(entry.dll, bytes):
libname = entry.dll.decode().lower()
else:
libname = entry.dll.lower()
parts = libname.rsplit('.', 1)
if len(parts) > 1:
if parts[1] in exts:
libname = parts[0]
for imp in entry.imports:
funcname = None
if not imp.name:
funcname = ordlookup.ordLookup(entry.dll.lower(), imp.ordinal, make_name=True)
if not funcname:
raise Exception('Unable to look up ordinal %s:%04x' % (entry.dll, imp.ordinal))
else:
funcname = imp.name
if not funcname:
pass
else:
if isinstance(funcname, bytes):
funcname = funcname.decode()
impstrs.append('%s.%s' % (libname.lower(), funcname.lower()))
return md5((',').join(impstrs).encode()).hexdigest()
def parse_import_directory(self, rva, size, dllnames_only=False):
"""Walk and parse the import directory."""
import_descs = []
error_count = 0
while 1:
try:
data = self.get_data(rva, Structure(self.__IMAGE_IMPORT_DESCRIPTOR_format__).sizeof())
except PEFormatError as e:
self._PE__warnings.append('Error parsing the import directory at RVA: 0x%x' % rva)
break
file_offset = self.get_offset_from_rva(rva)
import_desc = self.__unpack_data__(self.__IMAGE_IMPORT_DESCRIPTOR_format__,
data,
file_offset=file_offset)
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
max_len = len(self.__data__) - file_offset
if rva > import_desc.OriginalFirstThunk or rva > import_desc.FirstThunk:
max_len = max(rva - import_desc.OriginalFirstThunk, rva - import_desc.FirstThunk)
import_data = []
if not dllnames_only:
try:
import_data = self.parse_imports(import_desc.OriginalFirstThunk,
import_desc.FirstThunk,
import_desc.ForwarderChain,
max_length=max_len)
except PEFormatError as e:
self._PE__warnings.append(('Error parsing the import directory. Invalid Import data at RVA: 0x{0:x} ({1})').format(rva, e.value))
if error_count > 5:
self._PE__warnings.append(('Too may errors parsing the import directory. Invalid import data at RVA: 0x{0:x}').format(rva))
break
if not import_data:
error_count += 1
continue
dll = self.get_string_at_rva(import_desc.Name, MAX_DLL_LENGTH)
if not is_valid_dos_filename(dll):
dll = b('*invalid*')
if dll:
for symbol in import_data:
if symbol.name is None:
funcname = ordlookup.ordLookup(dll.lower(), symbol.ordinal)
symbol.name = funcname and funcname
import_descs.append(ImportDescData(struct=import_desc,
imports=import_data,
dll=dll))
if not dllnames_only:
suspicious_imports = set(['LoadLibrary', 'GetProcAddress'])
suspicious_imports_count = 0
total_symbols = 0
for imp_dll in import_descs:
for symbol in imp_dll.imports:
for suspicious_symbol in suspicious_imports:
if not not symbol:
if not symbol.name:
pass
else:
name = symbol.name
if type(symbol.name) == bytes:
name = symbol.name.decode('utf-8')
if name.startswith(suspicious_symbol):
pass
suspicious_imports_count += 1
break
total_symbols += 1
if suspicious_imports_count == len(suspicious_imports):
if total_symbols < 20:
self._PE__warnings.append('Imported symbols contain entries typical of packed executables.')
return import_descs
def parse_imports(self, original_first_thunk, first_thunk, forwarder_chain, max_length=None):
"""Parse the imported symbols.
It will fill a list, which will be available as the dictionary
attribute "imports". Its keys will be the DLL names and the values
all the symbols imported from that object.
"""
imported_symbols = []
ilt = self.get_import_table(original_first_thunk, max_length)
iat = self.get_import_table(first_thunk, max_length)
if not iat or len(iat) == 0:
if not ilt or len(ilt) == 0:
self._PE__warnings.append(('Damaged Import Table information. ILT and/or IAT appear to be broken. OriginalFirstThunk: 0x{0:x} FirstThunk: 0x{1:x}').format(original_first_thunk, first_thunk))
return []
table = None
if ilt:
table = ilt
else:
if iat:
table = iat
else:
return
imp_offset = 4
address_mask = 2147483647
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
else:
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
imp_offset = 8
address_mask = 9223372036854775807
else:
ordinal_flag = IMAGE_ORDINAL_FLAG
num_invalid = 0
for idx in range(len(table)):
imp_ord = None
imp_hint = None
imp_name = None
name_offset = None
hint_name_table_rva = None
if table[idx].AddressOfData:
if table[idx].AddressOfData & ordinal_flag:
import_by_ordinal = True
imp_ord = table[idx].AddressOfData & 65535
imp_name = None
name_offset = None
else:
import_by_ordinal = False
try:
hint_name_table_rva = table[idx].AddressOfData & address_mask
data = self.get_data(hint_name_table_rva, 2)
imp_hint = self.get_word_from_data(data, 0)
imp_name = self.get_string_at_rva(table[idx].AddressOfData + 2, MAX_IMPORT_NAME_LENGTH)
if not is_valid_function_name(imp_name):
imp_name = b('*invalid*')
name_offset = self.get_offset_from_rva(table[idx].AddressOfData + 2)
except PEFormatError as e:
pass
thunk_offset = table[idx].get_file_offset()
thunk_rva = self.get_rva_from_offset(thunk_offset)
imp_address = first_thunk + self.OPTIONAL_HEADER.ImageBase + idx * imp_offset
struct_iat = None
try:
if iat:
pass
if ilt:
if ilt[idx].AddressOfData != iat[idx].AddressOfData:
imp_bound = iat[idx].AddressOfData
struct_iat = iat[idx]
imp_bound = None
except IndexError:
imp_bound = None
if imp_ord == None:
if imp_name == None:
raise PEFormatError('Invalid entries, aborting parsing.')
if imp_name == b('*invalid*'):
if num_invalid > 1000:
if num_invalid == idx:
raise PEFormatError('Too many invalid names, aborting parsing.')
num_invalid += 1
continue
if imp_ord or imp_name:
imported_symbols.append(ImportData(pe=self,
struct_table=table[idx],
struct_iat=struct_iat,
import_by_ordinal=import_by_ordinal,
ordinal=imp_ord,
ordinal_offset=table[idx].get_file_offset(),
hint=imp_hint,
name=imp_name,
name_offset=name_offset,
bound=imp_bound,
address=imp_address,
hint_name_table_rva=hint_name_table_rva,
thunk_offset=thunk_offset,
thunk_rva=thunk_rva))
return imported_symbols
def get_import_table(self, rva, max_length=None):
table = []
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
format = self.__IMAGE_THUNK_DATA_format__
else:
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
format = self.__IMAGE_THUNK_DATA64_format__
else:
ordinal_flag = IMAGE_ORDINAL_FLAG
format = self.__IMAGE_THUNK_DATA_format__
MAX_ADDRESS_SPREAD = 134217728
MAX_REPEATED_ADDRESSES = 15
repeated_address = 0
addresses_of_data_set_64 = set()
addresses_of_data_set_32 = set()
start_rva = rva
while True and rva and max_length is not None:
if rva >= start_rva + max_length:
self._PE__warnings.append('Error parsing the import table. Entries go beyond bounds.')
break
if repeated_address >= MAX_REPEATED_ADDRESSES:
return []
if addresses_of_data_set_32:
if max(addresses_of_data_set_32) - min(addresses_of_data_set_32) > MAX_ADDRESS_SPREAD:
return []
if addresses_of_data_set_64:
if max(addresses_of_data_set_64) - min(addresses_of_data_set_64) > MAX_ADDRESS_SPREAD:
return []
failed = False
try:
data = self.get_data(rva, Structure(format).sizeof())
except PEFormatError as e:
failed = True
if failed or len(data) != Structure(format).sizeof():
self._PE__warnings.append('Error parsing the import table. Invalid data at RVA: 0x%x' % rva)
return
thunk_data = self.__unpack_data__(format,
data, file_offset=self.get_offset_from_rva(rva))
if thunk_data:
if thunk_data.AddressOfData >= start_rva:
if thunk_data.AddressOfData <= rva:
self._PE__warnings.append('Error parsing the import table. AddressOfData overlaps with THUNK_DATA for THUNK at RVA 0x%x' % rva)
break
if thunk_data:
pass
if thunk_data.AddressOfData:
if thunk_data.AddressOfData & ordinal_flag:
if thunk_data.AddressOfData & 2147483647 > 65535:
return []
else:
if thunk_data.AddressOfData in addresses_of_data_set_32 or thunk_data.AddressOfData in addresses_of_data_set_64:
repeated_address += 1
if thunk_data.AddressOfData >= 4294967296:
addresses_of_data_set_64.add(thunk_data.AddressOfData)
else:
addresses_of_data_set_32.add(thunk_data.AddressOfData)
if not thunk_data or thunk_data.all_zeroes():
break
rva += thunk_data.sizeof()
table.append(thunk_data)
return table
def get_memory_mapped_image(self, max_virtual_address=268435456, ImageBase=None):
"""Returns the data corresponding to the memory layout of the PE file.
The data includes the PE header and the sections loaded at offsets
corresponding to their relative virtual addresses. (the VirtualAddress
section header member).
Any offset in this data corresponds to the absolute memory address
ImageBase+offset.
The optional argument 'max_virtual_address' provides with means of limiting
which sections are processed.
Any section with their VirtualAddress beyond this value will be skipped.
Normally, sections with values beyond this range are just there to confuse
tools. It's a common trick to see in packed executables.
If the 'ImageBase' optional argument is supplied, the file's relocations
will be applied to the image by calling the 'relocate_image()' method. Beware
that the relocation information is applied permanently.
"""
if ImageBase is not None:
original_data = self.__data__
self.relocate_image(ImageBase)
mapped_data = self.__data__[:]
for section in self.sections:
if section.Misc_VirtualSize == 0:
if section.SizeOfRawData == 0:
continue
if section.SizeOfRawData > len(self.__data__):
pass
elif self.adjust_FileAlignment(section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment) > len(self.__data__):
pass
else:
VirtualAddress_adj = self.adjust_SectionAlignment(section.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment)
if VirtualAddress_adj >= max_virtual_address:
pass
else:
padding_length = VirtualAddress_adj - len(mapped_data)
if padding_length > 0:
mapped_data += b'\x00' * padding_length
else:
if padding_length < 0:
mapped_data = mapped_data[:padding_length]
mapped_data += section.get_data()
if ImageBase is not None:
self.__data__ = original_data
return mapped_data
def get_resources_strings(self):
"""Returns a list of all the strings found withing the resources (if any).
This method will scan all entries in the resources directory of the PE, if
there is one, and will return a list() with the strings.
An empty list will be returned otherwise.
"""
resources_strings = list()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
if hasattr(resource_id.directory, 'strings'):
if resource_id.directory.strings:
for res_string in list(resource_id.directory.strings.values()):
resources_strings.append(res_string)
return resources_strings
def get_data(self, rva=0, length=None):
"""Get data regardless of the section where it lies on.
Given a RVA and the size of the chunk to retrieve, this method
will find the section where the data lies and return the data.
"""
s = self.get_section_by_rva(rva)
if length:
end = rva + length
else:
end = None
if not s:
if rva < len(self.header):
return self.header[rva:end]
if rva < len(self.__data__):
return self.__data__[rva:end]
raise PEFormatError("data at RVA can't be fetched. Corrupt header?")
return s.get_data(rva, length)
def get_rva_from_offset(self, offset):
"""Get the RVA corresponding to this file offset. """
s = self.get_section_by_offset(offset)
if not s:
if self.sections:
lowest_rva = min([self.adjust_SectionAlignment(s.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment) for s in self.sections])
if offset < lowest_rva:
return offset
return
return offset
else:
return s.get_rva_from_offset(offset)
def get_offset_from_rva(self, rva):
"""Get the file offset corresponding to this RVA.
Given a RVA , this method will find the section where the
data lies and return the offset within the file.
"""
s = self.get_section_by_rva(rva)
if not s:
if rva < len(self.__data__):
return rva
raise PEFormatError("data at RVA can't be fetched. Corrupt header?")
return s.get_offset_from_rva(rva)
def get_string_at_rva(self, rva, max_length=MAX_STRING_LENGTH):
"""Get an ASCII string located at the given address."""
if rva is None:
return
else:
s = self.get_section_by_rva(rva)
if not s:
return self.get_string_from_data(0, self.__data__[rva:rva + max_length])
return self.get_string_from_data(0, s.get_data(rva, length=max_length))
def get_bytes_from_data(self, offset, data):
"""."""
if offset > len(data):
return b''
else:
d = data[offset:]
if isinstance(d, bytearray):
return bytes(d)
return d
def get_string_from_data(self, offset, data):
"""Get an ASCII string from data."""
s = self.get_bytes_from_data(offset, data)
end = s.find(b'\x00')
if end >= 0:
s = s[:end]
return s
def get_string_u_at_rva(self, rva, max_length=65536, encoding=None):
"""Get an Unicode string located at the given address."""
try:
data = self.get_data(rva, 2)
except PEFormatError as e:
return
max_length <<= 1
requested = min(max_length, 256)
data = self.get_data(rva, requested)
null_index = -1
while 1:
null_index = data.find(b'\x00\x00', null_index + 1)
if null_index == -1:
data_length = len(data)
if data_length < requested or data_length == max_length:
null_index = len(data) >> 1
break
else:
data += self.get_data(rva + data_length, max_length - data_length)
null_index = requested - 1
requested = max_length
elif null_index % 2 == 0:
null_index >>= 1
break
uchrs = struct.unpack(('<{:d}H').format(null_index), data[:null_index * 2])
s = ('').join(map(chr, uchrs))
if encoding:
return b(s.encode(encoding, 'backslashreplace_'))
else:
return b(s.encode('utf-8', 'backslashreplace_'))
def get_section_by_offset(self, offset):
"""Get the section containing the given file offset."""
sections = [s for s in self.sections if s.contains_offset(offset)]
if sections:
return sections[0]
def get_section_by_rva(self, rva):
"""Get the section containing the given address."""
sections = [s for s in self.sections if s.contains_rva(rva)]
if sections:
return sections[0]
def __str__(self):
return self.dump_info()
def has_relocs(self):
"""Checks if the PE file has relocation directory"""
return hasattr(self, 'DIRECTORY_ENTRY_BASERELOC')
def print_info(self, encoding='utf-8'):
"""Print all the PE header information in a human readable from."""
print(self.dump_info(encoding=encoding))
def dump_info(self, dump=None, encoding='ascii'):
"""Dump all the PE header information into human readable string."""
if dump is None:
dump = Dump()
warnings = self.get_warnings()
if warnings:
dump.add_header('Parsing Warnings')
for warning in warnings:
dump.add_line(warning)
dump.add_newline()
dump.add_header('DOS_HEADER')
dump.add_lines(self.DOS_HEADER.dump())
dump.add_newline()
dump.add_header('NT_HEADERS')
dump.add_lines(self.NT_HEADERS.dump())
dump.add_newline()
dump.add_header('FILE_HEADER')
dump.add_lines(self.FILE_HEADER.dump())
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump.add('Flags: ')
flags = []
for flag in sorted(image_flags):
if getattr(self.FILE_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line((', ').join(flags))
dump.add_newline()
if hasattr(self, 'OPTIONAL_HEADER'):
if self.OPTIONAL_HEADER is not None:
dump.add_header('OPTIONAL_HEADER')
dump.add_lines(self.OPTIONAL_HEADER.dump())
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')
dump.add('DllCharacteristics: ')
flags = []
for flag in sorted(dll_characteristics_flags):
if getattr(self.OPTIONAL_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line((', ').join(flags))
dump.add_newline()
dump.add_header('PE Sections')
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
dump.add_lines(section.dump())
dump.add('Flags: ')
flags = []
for flag in sorted(section_flags):
if getattr(section, flag[0]):
flags.append(flag[0])
dump.add_line((', ').join(flags))
dump.add_line(('Entropy: {0:f} (Min=0.0, Max=8.0)').format(section.get_entropy()))
if md5 is not None:
dump.add_line(('MD5 hash: {0}').format(section.get_hash_md5()))
if sha1 is not None:
dump.add_line('SHA-1 hash: %s' % section.get_hash_sha1())
if sha256 is not None:
dump.add_line('SHA-256 hash: %s' % section.get_hash_sha256())
if sha512 is not None:
dump.add_line('SHA-512 hash: %s' % section.get_hash_sha512())
dump.add_newline()
if hasattr(self, 'OPTIONAL_HEADER'):
if hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY'):
dump.add_header('Directories')
for idx in range(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump.add_lines(directory.dump())
dump.add_newline()
if hasattr(self, 'VS_VERSIONINFO'):
dump.add_header('Version Information')
dump.add_lines(self.VS_VERSIONINFO.dump())
dump.add_newline()
if hasattr(self, 'VS_FIXEDFILEINFO'):
dump.add_lines(self.VS_FIXEDFILEINFO.dump())
dump.add_newline()
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
dump.add_lines(entry.dump())
dump.add_newline()
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
[dump.add_line(' ' + line) for line in st_entry.dump()]
dump.add_line((' LangID: {0}').format(st_entry.LangID.decode(encoding, 'backslashreplace_')))
dump.add_newline()
for str_entry in sorted(list(st_entry.entries.items())):
dump.add_line((' {0}: {1}').format(str_entry[0].decode(encoding, 'backslashreplace_'), str_entry[1].decode(encoding, 'backslashreplace_')))
dump.add_newline()
else:
if hasattr(entry, 'Var'):
for var_entry in entry.Var:
if hasattr(var_entry, 'entry'):
[dump.add_line(' ' + line) for line in var_entry.dump()]
dump.add_line((' {0}: {1}').format(list(var_entry.entry.keys())[0].decode('utf-8', 'backslashreplace_'), list(var_entry.entry.values())[0]))
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump.add_header('Exported symbols')
dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump())
dump.add_newline()
dump.add_line('%-10s %-10s %s' % ('Ordinal', 'RVA', 'Name'))
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
if export.address is not None:
name = b('None')
if export.name:
name = export.name
dump.add('%-10d 0x%08Xh %s' % (
export.ordinal, export.address, name.decode(encoding)))
if export.forwarder:
dump.add_line((' forwarder: {0}').format(export.forwarder.decode(encoding, 'backslashreplace_')))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump.add_header('Imported symbols')
for module in self.DIRECTORY_ENTRY_IMPORT:
dump.add_lines(module.struct.dump())
if not module.imports:
dump.add((' Name -> {0}').format(self.get_string_at_rva(module.struct.Name).decode(encoding, 'backslashreplace_')))
dump.add_newline()
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
if symbol.name is not None:
dump.add(('{0}.{1} Ordinal[{2}] (Imported by Ordinal)').format(module.dll.decode('utf-8'), symbol.name.decode('utf-8'), symbol.ordinal))
else:
dump.add(('{0} Ordinal[{1}] (Imported by Ordinal)').format(module.dll.decode('utf-8'), symbol.ordinal))
else:
dump.add(('{0}.{1} Hint[{2:d}]').format(module.dll.decode(encoding, 'backslashreplace_'), symbol.name.decode(encoding, 'backslashreplace_'), symbol.hint))
if symbol.bound:
dump.add_line((' Bound: 0x{0:08X}').format(symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump.add_header('Bound imports')
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
dump.add_lines(bound_imp_desc.struct.dump())
dump.add_line(('DLL: {0}').format(bound_imp_desc.name.decode(encoding, 'backslashreplace_')))
dump.add_newline()
for bound_imp_ref in bound_imp_desc.entries:
dump.add_lines(bound_imp_ref.struct.dump(), 4)
dump.add_line(('DLL: {0}').format(bound_imp_ref.name.decode(encoding, 'backslashreplace_')), 4)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump.add_header('Delay Imported symbols')
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add(('{0} Ordinal[{1:d}] (Imported by Ordinal)').format(module.dll.decode(encoding, 'backslashreplace_'), symbol.ordinal))
else:
dump.add(('{0}.{1} Hint[{2}]').format(module.dll.decode(encoding, 'backslashreplace_'), symbol.name.decode(encoding, 'backslashreplace_'), symbol.hint))
if symbol.bound:
dump.add_line((' Bound: 0x{0:08X}').format(symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump.add_header('Resource directory')
dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
dump.add_line(('Name: [{0}]').format(resource_type.name.decode(encoding, 'backslashreplace_')), 2)
else:
dump.add_line(('Id: [0x{0:X}] ({1})').format(resource_type.struct.Id, RESOURCE_TYPE.get(resource_type.struct.Id, '-')), 2)
dump.add_lines(resource_type.struct.dump(), 2)
if hasattr(resource_type, 'directory'):
dump.add_lines(resource_type.directory.struct.dump(), 4)
for resource_id in resource_type.directory.entries:
if resource_id.name is not None:
dump.add_line(('Name: [{0}]').format(resource_id.name.decode('utf-8', 'backslashreplace_')), 6)
else:
dump.add_line(('Id: [0x{0:X}]').format(resource_id.struct.Id), 6)
dump.add_lines(resource_id.struct.dump(), 6)
if hasattr(resource_id, 'directory'):
dump.add_lines(resource_id.directory.struct.dump(), 8)
for resource_lang in resource_id.directory.entries:
if hasattr(resource_lang, 'data'):
dump.add_line('\\--- LANG [%d,%d][%s,%s]' % (
resource_lang.data.lang,
resource_lang.data.sublang,
LANG.get(resource_lang.data.lang, '*unknown*'),
get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)), 8)
dump.add_lines(resource_lang.struct.dump(), 10)
dump.add_lines(resource_lang.data.struct.dump(), 12)
if hasattr(resource_id.directory, 'strings'):
if resource_id.directory.strings:
dump.add_line('[STRINGS]', 10)
for idx, res_string in list(resource_id.directory.strings.items()):
dump.add_line(('{0:6d}: {1}').format(idx, res_string.encode('unicode-escape', 'backslashreplace').decode('ascii')), 12)
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_TLS'):
if self.DIRECTORY_ENTRY_TLS:
if self.DIRECTORY_ENTRY_TLS.struct:
dump.add_header('TLS')
dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump())
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG'):
if self.DIRECTORY_ENTRY_LOAD_CONFIG:
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct:
dump.add_header('LOAD_CONFIG')
dump.add_lines(self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump())
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump.add_header('Debug information')
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dump.add_lines(dbg.struct.dump())
try:
dump.add_line('Type: ' + DEBUG_TYPE[dbg.struct.Type])
except KeyError:
dump.add_line(('Type: 0x{0:x}(Unknown)').format(dbg.struct.Type))
dump.add_newline()
if dbg.entry:
dump.add_lines(dbg.entry.dump(), 4)
dump.add_newline()
if self.has_relocs():
dump.add_header('Base relocations')
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
dump.add_lines(base_reloc.struct.dump())
for reloc in base_reloc.entries:
try:
dump.add_line('%08Xh %s' % (
reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4)
except KeyError:
dump.add_line('0x%08X 0x%x(Unknown)' % (
reloc.rva, reloc.type), 4)
dump.add_newline()
return dump.get_text()
def dump_dict(self, dump=None):
"""Dump all the PE header information into a dictionary."""
dump_dict = dict()
warnings = self.get_warnings()
if warnings:
dump_dict['Parsing Warnings'] = warnings
dump_dict['DOS_HEADER'] = self.DOS_HEADER.dump_dict()
dump_dict['NT_HEADERS'] = self.NT_HEADERS.dump_dict()
dump_dict['FILE_HEADER'] = self.FILE_HEADER.dump_dict()
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump_dict['Flags'] = list()
for flag in image_flags:
if getattr(self.FILE_HEADER, flag[0]):
dump_dict['Flags'].append(flag[0])
if hasattr(self, 'OPTIONAL_HEADER'):
if self.OPTIONAL_HEADER is not None:
dump_dict['OPTIONAL_HEADER'] = self.OPTIONAL_HEADER.dump_dict()
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')
dump_dict['DllCharacteristics'] = list()
for flag in dll_characteristics_flags:
if getattr(self.OPTIONAL_HEADER, flag[0]):
dump_dict['DllCharacteristics'].append(flag[0])
dump_dict['PE Sections'] = list()
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
section_dict = section.dump_dict()
dump_dict['PE Sections'].append(section_dict)
section_dict['Flags'] = list()
for flag in section_flags:
if getattr(section, flag[0]):
section_dict['Flags'].append(flag[0])
section_dict['Entropy'] = section.get_entropy()
if md5 is not None:
section_dict['MD5'] = section.get_hash_md5()
if sha1 is not None:
section_dict['SHA1'] = section.get_hash_sha1()
if sha256 is not None:
section_dict['SHA256'] = section.get_hash_sha256()
if sha512 is not None:
section_dict['SHA512'] = section.get_hash_sha512()
if hasattr(self, 'OPTIONAL_HEADER'):
if hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY'):
dump_dict['Directories'] = list()
for idx in range(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump_dict['Directories'].append(directory.dump_dict())
if hasattr(self, 'VS_VERSIONINFO'):
dump_dict['Version Information'] = list()
dump_dict['Version Information'].append(self.VS_VERSIONINFO.dump_dict())
if hasattr(self, 'VS_FIXEDFILEINFO'):
dump_dict['Version Information'].append(self.VS_FIXEDFILEINFO.dump_dict())
if hasattr(self, 'FileInfo'):
fileinfo_list = list()
for entry in self.FileInfo:
fileinfo_list.append(entry.dump_dict())
if hasattr(entry, 'StringTable'):
stringtable_dict = dict()
for st_entry in entry.StringTable:
[fileinfo_list.append(line) for line in st_entry.dump_dict()]
stringtable_dict['LangID'] = st_entry.LangID
for str_entry in list(st_entry.entries.items()):
stringtable_dict[str_entry[0]] = str_entry[1]
fileinfo_list.append(stringtable_dict)
else:
if hasattr(entry, 'Var'):
for var_entry in entry.Var:
var_dict = dict()
if hasattr(var_entry, 'entry'):
[fileinfo_list.append(line) for line in var_entry.dump_dict()]
var_dict[list(var_entry.entry.keys())[0]] = list(var_entry.entry.values())[0]
fileinfo_list.append(var_dict)
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump_dict['Exported symbols'] = list()
dump_dict['Exported symbols'].append(self.DIRECTORY_ENTRY_EXPORT.struct.dump_dict())
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
export_dict = dict()
if export.address is not None:
export_dict.update({'Ordinal':export.ordinal, 'RVA':export.address, 'Name':export.name})
if export.forwarder:
export_dict['forwarder'] = export.forwarder
dump_dict['Exported symbols'].append(export_dict)
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump_dict['Imported symbols'] = list()
for module in self.DIRECTORY_ENTRY_IMPORT:
import_list = list()
dump_dict['Imported symbols'].append(import_list)
import_list.append(module.struct.dump_dict())
for symbol in module.imports:
symbol_dict = dict()
if symbol.import_by_ordinal is True:
symbol_dict['DLL'] = module.dll
symbol_dict['Ordinal'] = symbol.ordinal
else:
symbol_dict['DLL'] = module.dll
symbol_dict['Name'] = symbol.name
symbol_dict['Hint'] = symbol.hint
if symbol.bound:
symbol_dict['Bound'] = symbol.bound
import_list.append(symbol_dict)
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump_dict['Bound imports'] = list()
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
bound_imp_desc_dict = dict()
dump_dict['Bound imports'].append(bound_imp_desc_dict)
bound_imp_desc_dict.update(bound_imp_desc.struct.dump_dict())
bound_imp_desc_dict['DLL'] = bound_imp_desc.name
for bound_imp_ref in bound_imp_desc.entries:
bound_imp_ref_dict = dict()
bound_imp_ref_dict.update(bound_imp_ref.struct.dump_dict())
bound_imp_ref_dict['DLL'] = bound_imp_ref.name
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump_dict['Delay Imported symbols'] = list()
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
module_list = list()
dump_dict['Delay Imported symbols'].append(module_list)
module_list.append(module.struct.dump_dict())
for symbol in module.imports:
symbol_dict = dict()
if symbol.import_by_ordinal is True:
symbol_dict['DLL'] = module.dll
symbol_dict['Ordinal'] = symbol.ordinal
else:
symbol_dict['DLL'] = module.dll
symbol_dict['Name'] = symbol.name
symbol_dict['Hint'] = symbol.hint
if symbol.bound:
symbol_dict['Bound'] = symbol.bound
module_list.append(symbol_dict)
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump_dict['Resource directory'] = list()
dump_dict['Resource directory'].append(self.DIRECTORY_ENTRY_RESOURCE.struct.dump_dict())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
resource_type_dict = dict()
if resource_type.name is not None:
resource_type_dict['Name'] = resource_type.name
else:
resource_type_dict['Id'] = (
resource_type.struct.Id, RESOURCE_TYPE.get(resource_type.struct.Id, '-'))
resource_type_dict.update(resource_type.struct.dump_dict())
dump_dict['Resource directory'].append(resource_type_dict)
if hasattr(resource_type, 'directory'):
directory_list = list()
directory_list.append(resource_type.directory.struct.dump_dict())
dump_dict['Resource directory'].append(directory_list)
for resource_id in resource_type.directory.entries:
resource_id_dict = dict()
if resource_id.name is not None:
resource_id_dict['Name'] = resource_id.name
else:
resource_id_dict['Id'] = resource_id.struct.Id
resource_id_dict.update(resource_id.struct.dump_dict())
directory_list.append(resource_id_dict)
if hasattr(resource_id, 'directory'):
resource_id_list = list()
resource_id_list.append(resource_id.directory.struct.dump_dict())
directory_list.append(resource_id_list)
for resource_lang in resource_id.directory.entries:
if hasattr(resource_lang, 'data'):
resource_lang_dict = dict()
resource_lang_dict['LANG'] = resource_lang.data.lang
resource_lang_dict['SUBLANG'] = resource_lang.data.sublang
resource_lang_dict['LANG_NAME'] = LANG.get(resource_lang.data.lang, '*unknown*')
resource_lang_dict['SUBLANG_NAME'] = get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)
resource_lang_dict.update(resource_lang.struct.dump_dict())
resource_lang_dict.update(resource_lang.data.struct.dump_dict())
resource_id_list.append(resource_lang_dict)
if hasattr(resource_id.directory, 'strings'):
if resource_id.directory.strings:
for idx, res_string in list(resource_id.directory.strings.items()):
resource_id_list.append(res_string.encode('unicode-escape', 'backslashreplace').decode('ascii'))
if hasattr(self, 'DIRECTORY_ENTRY_TLS'):
if self.DIRECTORY_ENTRY_TLS:
if self.DIRECTORY_ENTRY_TLS.struct:
dump_dict['TLS'] = self.DIRECTORY_ENTRY_TLS.struct.dump_dict()
if hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG'):
if self.DIRECTORY_ENTRY_LOAD_CONFIG:
if self.DIRECTORY_ENTRY_LOAD_CONFIG.struct:
dump_dict['LOAD_CONFIG'] = self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump_dict()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump_dict['Debug information'] = list()
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dbg_dict = dict()
dump_dict['Debug information'].append(dbg_dict)
dbg_dict.update(dbg.struct.dump_dict())
dbg_dict['Type'] = DEBUG_TYPE.get(dbg.struct.Type, dbg.struct.Type)
if self.has_relocs():
dump_dict['Base relocations'] = list()
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
base_reloc_list = list()
dump_dict['Base relocations'].append(base_reloc_list)
base_reloc_list.append(base_reloc.struct.dump_dict())
for reloc in base_reloc.entries:
reloc_dict = dict()
base_reloc_list.append(reloc_dict)
reloc_dict['RVA'] = reloc.rva
try:
reloc_dict['Type'] = RELOCATION_TYPE[reloc.type][16:]
except KeyError:
reloc_dict['Type'] = reloc.type
return dump_dict
def get_physical_by_rva(self, rva):
"""Gets the physical address in the PE file from an RVA value."""
try:
return self.get_offset_from_rva(rva)
except Exception:
return
def get_data_from_dword(self, dword):
"""Return a four byte string representing the double word value. (little endian)."""
return struct.pack('<L', dword & 4294967295)
def get_dword_from_data(self, data, offset):
"""Convert four bytes of data to a double word (little endian)
'offset' is assumed to index into a dword array. So setting it to
N will return a dword out of the data starting at offset N*4.
Returns None if the data can't be turned into a double word.
"""
if (offset + 1) * 4 > len(data):
return
else:
return struct.unpack('<I', data[offset * 4:(offset + 1) * 4])[0]
def get_dword_at_rva(self, rva):
"""Return the double word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_dword_from_data(self.get_data(rva, 4), 0)
except PEFormatError:
return
def get_dword_from_offset(self, offset):
"""Return the double word value at the given file offset. (little endian)"""
if offset + 4 > len(self.__data__):
return
else:
return self.get_dword_from_data(self.__data__[offset:offset + 4], 0)
def set_dword_at_rva(self, rva, dword):
"""Set the double word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
def get_data_from_word(self, word):
"""Return a two byte string representing the word value. (little endian)."""
return struct.pack('<H', word)
def get_word_from_data(self, data, offset):
"""Convert two bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data starting at offset N*2.
Returns None if the data can't be turned into a word.
"""
if (offset + 1) * 2 > len(data):
return
else:
return struct.unpack('<H', data[offset * 2:(offset + 1) * 2])[0]
def get_word_at_rva(self, rva):
"""Return the word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_word_from_data(self.get_data(rva)[:2], 0)
except PEFormatError:
return
def get_word_from_offset(self, offset):
"""Return the word value at the given file offset. (little endian)"""
if offset + 2 > len(self.__data__):
return
else:
return self.get_word_from_data(self.__data__[offset:offset + 2], 0)
def set_word_at_rva(self, rva, word):
"""Set the word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
def set_word_at_offset(self, offset, word):
"""Set the word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
def get_data_from_qword(self, word):
"""Return a eight byte string representing the quad-word value. (little endian)."""
return struct.pack('<Q', word)
def get_qword_from_data(self, data, offset):
"""Convert eight bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data starting at offset N*8.
Returns None if the data can't be turned into a quad word.
"""
if (offset + 1) * 8 > len(data):
return
else:
return struct.unpack('<Q', data[offset * 8:(offset + 1) * 8])[0]
def get_qword_at_rva(self, rva):
"""Return the quad-word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_qword_from_data(self.get_data(rva)[:8], 0)
except PEFormatError:
return
def get_qword_from_offset(self, offset):
"""Return the quad-word value at the given file offset. (little endian)"""
if offset + 8 > len(self.__data__):
return
else:
return self.get_qword_from_data(self.__data__[offset:offset + 8], 0)
def set_qword_at_rva(self, rva, qword):
"""Set the quad-word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
def set_qword_at_offset(self, offset, qword):
"""Set the quad-word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword))
def set_bytes_at_rva(self, rva, data):
"""Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, bytes):
raise TypeError('data should be of type: bytes')
offset = self.get_physical_by_rva(rva)
if not offset:
return False
else:
return self.set_bytes_at_offset(offset, data)
def set_bytes_at_offset(self, offset, data):
"""Overwrite the bytes at the given file offset with the given string.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, bytes):
raise TypeError('data should be of type: bytes')
if offset >= 0:
if offset < len(self.__data__):
self.__data__ = self.__data__[:offset] + data + self.__data__[offset + len(data):]
return False
return True
def merge_modified_section_data(self):
"""Update the PE image content with any individual section data that has been modified."""
for section in self.sections:
section_data_start = self.adjust_FileAlignment(section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment)
section_data_end = section_data_start + section.SizeOfRawData
if section_data_start < len(self.__data__):
self.__data__ = section_data_end < len(self.__data__) and self.__data__[:section_data_start] + section.get_data() + self.__data__[section_data_end:]
def relocate_image(self, new_ImageBase):
"""Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work.
"""
relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase
if hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
for reloc in self.DIRECTORY_ENTRY_BASERELOC:
virtual_address = reloc.struct.VirtualAddress
size_of_block = reloc.struct.SizeOfBlock
entry_idx = 0
while entry_idx < len(reloc.entries):
entry = reloc.entries[entry_idx]
entry_idx += 1
if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']:
continue
if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']:
self.set_word_at_rva(entry.rva, self.get_word_at_rva(entry.rva) + relocation_difference >> 16 & 65535)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']:
self.set_word_at_rva(entry.rva, self.get_word_at_rva(entry.rva) + relocation_difference & 65535)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']:
self.set_dword_at_rva(entry.rva, self.get_dword_at_rva(entry.rva) + relocation_difference)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']:
if entry_idx == len(reloc.entries):
break
next_entry = reloc.entries[entry_idx]
entry_idx += 1
self.set_word_at_rva(entry.rva, ((self.get_word_at_rva(entry.rva) << 16) + next_entry.rva + relocation_difference & 4294901760) >> 16)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']:
self.set_qword_at_rva(entry.rva, self.get_qword_at_rva(entry.rva) + relocation_difference)
def verify_checksum(self):
return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum()
def generate_checksum(self):
self.__data__ = self.write()
checksum_offset = self.OPTIONAL_HEADER.get_file_offset() + 64
checksum = 0
remainder = len(self.__data__) % 4
data_len = len(self.__data__) + (4 - remainder) * (remainder != 0)
for i in range(int(data_len / 4)):
if i == int(checksum_offset / 4):
continue
if i + 1 == int(data_len / 4):
if remainder:
dword = struct.unpack('I', self.__data__[i * 4:] + b'\x00' * (4 - remainder))[0]
dword = struct.unpack('I', self.__data__[i * 4:i * 4 + 4])[0]
checksum += dword
if checksum >= 4294967296:
checksum = (checksum & 4294967295) + (checksum >> 32)
checksum = (checksum & 65535) + (checksum >> 16)
checksum = checksum + (checksum >> 16)
checksum = checksum & 65535
return checksum + len(self.__data__)
def is_exe(self):
"""Check whether the file is a standard executable.
This will return true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set
and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either.
"""
EXE_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_EXECUTABLE_IMAGE']
if not self.is_dll():
if not self.is_driver():
if EXE_flag & self.FILE_HEADER.Characteristics == EXE_flag:
return True
return False
def is_dll(self):
"""Check whether the file is a standard DLL.
This will return true only if the image has the IMAGE_FILE_DLL flag set.
"""
DLL_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_DLL']
if DLL_flag & self.FILE_HEADER.Characteristics == DLL_flag:
return True
else:
return False
def is_driver(self):
"""Check whether the file is a Windows driver.
This will return true only if there are reliable indicators of the image
being a driver.
"""
if not hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
self.parse_data_directories(directories=[
DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']])
if not hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
return False
else:
system_DLLs = set(('ntoskrnl.exe', 'hal.dll', 'ndis.sys', 'bootvid.dll',
'kdcom.dll'))
if system_DLLs.intersection([imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT]):
return True
driver_like_section_names = set(('page', 'paged'))
if driver_like_section_names.intersection([section.Name.lower() for section in self.sections]):
if self.OPTIONAL_HEADER.Subsystem in (
SUBSYSTEM_TYPE['IMAGE_SUBSYSTEM_NATIVE'],
SUBSYSTEM_TYPE['IMAGE_SUBSYSTEM_NATIVE_WINDOWS']):
return True
return False
def get_overlay_data_start_offset(self):
"""Get the offset of data appended to the file and not contained within
the area described in the headers."""
largest_offset_and_size = (0, 0)
def update_if_sum_is_larger_and_within_file(offset_and_size, file_size=len(self.__data__)):
if sum(offset_and_size) <= file_size:
if sum(offset_and_size) > sum(largest_offset_and_size):
return offset_and_size
return largest_offset_and_size
if hasattr(self, 'OPTIONAL_HEADER'):
largest_offset_and_size = update_if_sum_is_larger_and_within_file((
self.OPTIONAL_HEADER.get_file_offset(), self.FILE_HEADER.SizeOfOptionalHeader))
for section in self.sections:
largest_offset_and_size = update_if_sum_is_larger_and_within_file((
section.PointerToRawData, section.SizeOfRawData))
skip_directories = [DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']]
for idx, directory in enumerate(self.OPTIONAL_HEADER.DATA_DIRECTORY):
if idx in skip_directories:
continue
try:
largest_offset_and_size = update_if_sum_is_larger_and_within_file((
self.get_offset_from_rva(directory.VirtualAddress), directory.Size))
except PEFormatError:
continue
if len(self.__data__) > sum(largest_offset_and_size):
return sum(largest_offset_and_size)
def get_overlay(self):
"""Get the data appended to the file and not contained within the area described in the headers."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[overlay_data_offset:]
def trim(self):
"""Return the just data defined by the PE headers, removing any overlayed data."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[:overlay_data_offset]
else:
return self.__data__[:]
def adjust_FileAlignment(self, val, file_alignment):
global FileAlignment_Warning
if file_alignment > FILE_ALIGNMENT_HARDCODED_VALUE:
if not power_of_two(file_alignment):
if FileAlignment_Warning is False:
self._PE__warnings.append('If FileAlignment > 0x200 it should be a power of 2. Value: %x' % file_alignment)
FileAlignment_Warning = True
if file_alignment < FILE_ALIGNMENT_HARDCODED_VALUE:
return val
else:
return int(val / 512) * 512
def adjust_SectionAlignment(self, val, section_alignment, file_alignment):
global SectionAlignment_Warning
if file_alignment < FILE_ALIGNMENT_HARDCODED_VALUE:
if file_alignment != section_alignment:
if SectionAlignment_Warning is False:
self._PE__warnings.append('If FileAlignment(%x) < 0x200 it should equal SectionAlignment(%x)' % (
file_alignment, section_alignment))
SectionAlignment_Warning = True
if section_alignment < 4096:
section_alignment = file_alignment
if section_alignment:
if val % section_alignment:
return section_alignment * int(val / section_alignment)
return val
def main():
import sys
usage = 'pefile.py <filename>\npefile.py exports <filename>'
if not sys.argv[1:]:
print(usage)
else:
if sys.argv[1] == 'exports':
if not sys.argv[2:]:
sys.exit('error: <filename> required')
pe = PE(sys.argv[2])
for exp in pe.DIRECTORY_ENTRY_EXPORT.symbols:
print(hex(pe.OPTIONAL_HEADER.ImageBase + exp.address), exp.name, exp.ordinal)
else:
print(PE(sys.argv[1]).dump_info())
if __name__ == '__main__':
main()
Python
1
https://gitee.com/zhanghk668/opsrv_extracted.git
git@gitee.com:zhanghk668/opsrv_extracted.git
zhanghk668
opsrv_extracted
某扫描器核心反编译
master

搜索帮助