Architecture¶
+Warning
+doxygenclass: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+diff --git a/personal/api-docs/Architecture_c++.html b/personal/api-docs/Architecture_c++.html new file mode 100644 index 0000000..e7926b9 --- /dev/null +++ b/personal/api-docs/Architecture_c++.html @@ -0,0 +1,249 @@ + + + + + +
+ + + + +Warning
+doxygenclass: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+Warning
+doxygenclass: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+Warning
+doxygenenum: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+Warning
+doxygenfunction: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+Warning
+doxygenfunction: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+Warning
+doxygenfunction: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+Warning
+doxygenfunction: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+Warning
+doxygenfunction: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+Warning
+doxygenfunction: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+Warning
+doxygenclass: Cannot find file: /var/lib/jenkins/jobs/binaryninja-personal/workspace/os/linux/api/xml/index.xml
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+from __future__ import absolute_import
+import atexit
+import sys
+import ctypes
+from time import gmtime
+
+
+# 2-3 compatibility
+try:
+ import builtins # __builtins__ for python2
+except ImportError:
+ pass
+[docs]def range(*args):
+ """ A Python2 and Python3 Compatible Range Generator """
+ try:
+ return xrange(*args)
+ except NameError:
+ return builtins.range(*args)
+
+
+[docs]def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ class metaclass(type):
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+try:
+ long = long
+except NameError:
+ long = int
+
+
+[docs]def cstr(arg):
+ if isinstance(arg, bytes) or arg is None:
+ return arg
+ else:
+ return arg.encode('charmap')
+
+
+[docs]def pyNativeStr(arg):
+ if isinstance(arg, str):
+ return arg
+ else:
+ return arg.decode('charmap')
+
+
+# Binary Ninja components
+import binaryninja._binaryninjacore as core
+# __all__ = [
+# "enums",
+# "databuffer",
+# "filemetadata",
+# "fileaccessor",
+# "binaryview",
+# "transform",
+# "architecture",
+# "basicblock",
+# "function",
+# "log",
+# "lowlevelil",
+# "mediumlevelil",
+# "types",
+# "functionrecognizer",
+# "update",
+# "plugin",
+# "callingconvention",
+# "platform",
+# "demangle",
+# "mainthread",
+# "interaction",
+# "lineardisassembly",
+# "undoaction",
+# "highlight",
+# "scriptingprovider",
+# "pluginmanager",
+# "setting",
+# "metadata",
+# "flowgraph",
+# ]
+from binaryninja.enums import *
+from binaryninja.databuffer import *
+from binaryninja.filemetadata import *
+from binaryninja.fileaccessor import *
+from binaryninja.binaryview import *
+from binaryninja.transform import *
+from binaryninja.architecture import *
+from binaryninja.basicblock import *
+from binaryninja.function import *
+from binaryninja.log import *
+from binaryninja.lowlevelil import *
+from binaryninja.mediumlevelil import *
+from binaryninja.types import *
+from binaryninja.functionrecognizer import *
+from binaryninja.update import *
+from binaryninja.plugin import *
+from binaryninja.callingconvention import *
+from binaryninja.platform import *
+from binaryninja.demangle import *
+from binaryninja.mainthread import *
+from binaryninja.interaction import *
+from binaryninja.lineardisassembly import *
+from binaryninja.undoaction import *
+from binaryninja.highlight import *
+from binaryninja.scriptingprovider import *
+from binaryninja.downloadprovider import *
+from binaryninja.pluginmanager import *
+from binaryninja.settings import *
+from binaryninja.metadata import *
+from binaryninja.flowgraph import *
+from binaryninja.datarender import *
+
+
+[docs]def shutdown():
+ """
+ ``shutdown`` cleanly shuts down the core, stopping all workers and closing all log files.
+ """
+ core.BNShutdown()
+
+
+atexit.register(shutdown)
+
+
+
+
+
+[docs]def get_install_directory():
+ """
+ ``get_install_directory`` returns a string pointing to the installed binary currently running
+
+ ..warning:: ONLY for use within the Binary Ninja UI, behavior is undefined and unreliable if run headlessly
+ """
+ return core.BNGetInstallDirectory()
+
+
+_plugin_api_name = "python2"
+
+
+[docs]class PluginManagerLoadPluginCallback(object):
+ """Callback for BNLoadPluginForApi("python2", ...), dynamically loads python plugins."""
+ def __init__(self):
+ self.cb = ctypes.CFUNCTYPE(
+ ctypes.c_bool,
+ ctypes.c_char_p,
+ ctypes.c_char_p,
+ ctypes.c_void_p)(self._load_plugin)
+
+ def _load_plugin(self, repo_path, plugin_path, ctx):
+ try:
+ repo = RepositoryManager()[repo_path]
+ plugin = repo[plugin_path]
+
+ if plugin.api != _plugin_api_name:
+ raise ValueError("Plugin API name is not " + _plugin_api_name)
+
+ if not plugin.installed:
+ plugin.installed = True
+
+ if repo.full_path not in sys.path:
+ sys.path.append(repo.full_path)
+
+ __import__(plugin.path)
+ log_info("Successfully loaded plugin: {}/{}: ".format(repo_path, plugin_path))
+ return True
+ except KeyError:
+ log_error("Failed to find python plugin: {}/{}".format(repo_path, plugin_path))
+ except ImportError as ie:
+ log_error("Failed to import python plugin: {}/{}: {}".format(repo_path, plugin_path, ie))
+ return False
+
+
+load_plugin = PluginManagerLoadPluginCallback()
+core.BNRegisterForPluginLoading(_plugin_api_name, load_plugin.cb, 0)
+
+
+class _DestructionCallbackHandler(object):
+ def __init__(self):
+ self._cb = core.BNObjectDestructionCallbacks()
+ self._cb.context = 0
+ self._cb.destructBinaryView = self._cb.destructBinaryView.__class__(self.destruct_binary_view)
+ self._cb.destructFileMetadata = self._cb.destructFileMetadata.__class__(self.destruct_file_metadata)
+ self._cb.destructFunction = self._cb.destructFunction.__class__(self.destruct_function)
+ core.BNRegisterObjectDestructionCallbacks(self._cb)
+
+ def destruct_binary_view(self, ctxt, view):
+ BinaryView._unregister(view)
+
+ def destruct_file_metadata(self, ctxt, f):
+ FileMetadata._unregister(f)
+
+ def destruct_function(self, ctxt, func):
+ Function._unregister(func)
+
+
+_plugin_init = False
+
+
+def _init_plugins():
+ global _plugin_init
+ if not _plugin_init:
+ _plugin_init = True
+ core.BNInitCorePlugins()
+ core.BNInitUserPlugins()
+ core.BNInitRepoPlugins()
+ if not core.BNIsLicenseValidated():
+ raise RuntimeError("License is not valid. Please supply a valid license.")
+
+
+_destruct_callbacks = _DestructionCallbackHandler()
+
+[docs]def bundled_plugin_path():
+ """
+ ``bundled_plugin_path`` returns a string containing the current plugin path inside the `install path <https://docs.binary.ninja/getting-started.html#binary-path>`_
+
+ :return: current bundled plugin path
+ :rtype: str, or None on failure
+ """
+ return core.BNGetBundledPluginDirectory()
+
+[docs]def user_plugin_path():
+ """
+ ``user_plugin_path`` returns a string containing the current plugin path inside the `user directory <https://docs.binary.ninja/getting-started.html#user-folder>`_
+
+ :return: current user plugin path
+ :rtype: str, or None on failure
+ """
+ return core.BNGetUserPluginDirectory()
+
+[docs]def core_version():
+ """
+ ``core_version`` returns a string containing the current version
+
+ :return: current version
+ :rtype: str, or None on failure
+ """
+ return core.BNGetVersionString()
+
+[docs]def core_build_id():
+ """
+ ``core_build_id`` returns a string containing the current build id
+
+ :return: current build id
+ :rtype: str, or None on failure
+ """
+ core.BNGetBuildId()
+
+[docs]def core_serial():
+ """
+ ``core_serial`` returns a string containing the current serial number
+
+ :return: current serial
+ :rtype: str, or None on failure
+ """
+ return core.BNGetSerialNumber()
+
+[docs]def core_expires():
+ '''License Expiration'''
+ return gmtime(core.BNGetLicenseExpirationTime())
+
+
+
+[docs]def core_product_type():
+ '''Product type from the license file'''
+ return core.BNGetProductType()
+
+[docs]def core_license_count():
+ '''License count from the license file'''
+ return core.BNGetLicenseCount()
+
+[docs]def core_ui_enabled():
+ '''Indicates that a UI exists and the UI has invoked BNInitUI'''
+ return core.BNIsUIEnabled()
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from __future__ import absolute_import
+import traceback
+import ctypes
+import abc
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import (Endianness, ImplicitRegisterExtend, BranchType,
+ InstructionTextTokenType, LowLevelILFlagCondition, FlagRole)
+import binaryninja
+from binaryninja import log
+from binaryninja import lowlevelil
+from binaryninja import types
+from binaryninja import databuffer
+from binaryninja import platform
+from binaryninja import callingconvention
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import with_metaclass
+from binaryninja import long
+
+class _ArchitectureMetaClass(type):
+
+ @property
+ def list(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ archs = core.BNGetArchitectureList(count)
+ result = []
+ for i in range(0, count.value):
+ result.append(CoreArchitecture._from_cache(archs[i]))
+ core.BNFreeArchitectureList(archs)
+ return result
+
+ def __iter__(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ archs = core.BNGetArchitectureList(count)
+ try:
+ for i in range(0, count.value):
+ yield CoreArchitecture._from_cache(archs[i])
+ finally:
+ core.BNFreeArchitectureList(archs)
+
+ def __getitem__(cls, name):
+ binaryninja._init_plugins()
+ arch = core.BNGetArchitectureByName(name)
+ if arch is None:
+ raise KeyError("'%s' is not a valid architecture" % str(name))
+ return CoreArchitecture._from_cache(arch)
+
+ def register(cls):
+ binaryninja._init_plugins()
+ if cls.name is None:
+ raise ValueError("architecture 'name' is not defined")
+ arch = cls()
+ cls._registered_cb = arch._cb
+ arch.handle = core.BNRegisterArchitecture(cls.name, arch._cb)
+
+ def __setattr__(self, name, value):
+ try:
+ type.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+
+[docs]class Architecture(with_metaclass(_ArchitectureMetaClass, object)):
+ """
+ ``class Architecture`` is the parent class for all CPU architectures. Subclasses of Architecture implement assembly,
+ disassembly, IL lifting, and patching.
+
+ ``class Architecture`` has a metaclass with the additional methods ``register``, and supports
+ iteration::
+
+ >>> #List the architectures
+ >>> list(Architecture)
+ [<arch: aarch64>, <arch: armv7>, <arch: armv7eb>, <arch: mipsel32>, <arch: mips32>, <arch: powerpc>,
+ <arch: x86>, <arch: x86_64>]
+ >>> #Register a new Architecture
+ >>> class MyArch(Architecture):
+ ... name = "MyArch"
+ ...
+ >>> MyArch.register()
+ >>> list(Architecture)
+ [<arch: aarch64>, <arch: armv7>, <arch: armv7eb>, <arch: mipsel32>, <arch: mips32>, <arch: powerpc>,
+ <arch: x86>, <arch: x86_64>, <arch: MyArch>]
+ >>>
+
+ For the purposes of this documentation the variable ``arch`` will be used in the following context ::
+
+ >>> from binaryninja import *
+ >>> arch = Architecture['x86']
+ """
+ name = None
+ endianness = Endianness.LittleEndian
+ address_size = 8
+ default_int_size = 4
+ instr_alignment = 1
+ max_instr_length = 16
+ opcode_display_length = 8
+ regs = {}
+ stack_pointer = None
+ link_reg = None
+ global_regs = []
+ flags = []
+ flag_write_types = []
+ semantic_flag_classes = []
+ semantic_flag_groups = []
+ flag_roles = {}
+ flags_required_for_flag_condition = {}
+ flags_required_for_semantic_flag_group = {}
+ flag_conditions_for_semantic_flag_group = {}
+ flags_written_by_flag_write_type = {}
+ semantic_class_for_flag_write_type = {}
+ reg_stacks = {}
+ intrinsics = {}
+ next_address = 0
+
+[docs] def __init__(self):
+ binaryninja._init_plugins()
+
+ if self.__class__.opcode_display_length > self.__class__.max_instr_length:
+ self.__class__.opcode_display_length = self.__class__.max_instr_length
+
+ self._cb = core.BNCustomArchitecture()
+ self._cb.context = 0
+ self._cb.init = self._cb.init.__class__(self._init)
+ self._cb.getEndianness = self._cb.getEndianness.__class__(self._get_endianness)
+ self._cb.getAddressSize = self._cb.getAddressSize.__class__(self._get_address_size)
+ self._cb.getDefaultIntegerSize = self._cb.getDefaultIntegerSize.__class__(self._get_default_integer_size)
+ self._cb.getInstructionAlignment = self._cb.getInstructionAlignment.__class__(self._get_instruction_alignment)
+ self._cb.getMaxInstructionLength = self._cb.getMaxInstructionLength.__class__(self._get_max_instruction_length)
+ self._cb.getOpcodeDisplayLength = self._cb.getOpcodeDisplayLength.__class__(self._get_opcode_display_length)
+ self._cb.getAssociatedArchitectureByAddress = \
+ self._cb.getAssociatedArchitectureByAddress.__class__(self._get_associated_arch_by_address)
+ self._cb.getInstructionInfo = self._cb.getInstructionInfo.__class__(self._get_instruction_info)
+ self._cb.getInstructionText = self._cb.getInstructionText.__class__(self._get_instruction_text)
+ self._cb.freeInstructionText = self._cb.freeInstructionText.__class__(self._free_instruction_text)
+ self._cb.getInstructionLowLevelIL = self._cb.getInstructionLowLevelIL.__class__(
+ self._get_instruction_low_level_il)
+ self._cb.getRegisterName = self._cb.getRegisterName.__class__(self._get_register_name)
+ self._cb.getFlagName = self._cb.getFlagName.__class__(self._get_flag_name)
+ self._cb.getFlagWriteTypeName = self._cb.getFlagWriteTypeName.__class__(self._get_flag_write_type_name)
+ self._cb.getSemanticFlagClassName = self._cb.getSemanticFlagClassName.__class__(self._get_semantic_flag_class_name)
+ self._cb.getSemanticFlagGroupName = self._cb.getSemanticFlagGroupName.__class__(self._get_semantic_flag_group_name)
+ self._cb.getFullWidthRegisters = self._cb.getFullWidthRegisters.__class__(self._get_full_width_registers)
+ self._cb.getAllRegisters = self._cb.getAllRegisters.__class__(self._get_all_registers)
+ self._cb.getAllFlags = self._cb.getAllRegisters.__class__(self._get_all_flags)
+ self._cb.getAllFlagWriteTypes = self._cb.getAllRegisters.__class__(self._get_all_flag_write_types)
+ self._cb.getAllSemanticFlagClasses = self._cb.getAllSemanticFlagClasses.__class__(self._get_all_semantic_flag_classes)
+ self._cb.getAllSemanticFlagGroups = self._cb.getAllSemanticFlagGroups.__class__(self._get_all_semantic_flag_groups)
+ self._cb.getFlagRole = self._cb.getFlagRole.__class__(self._get_flag_role)
+ self._cb.getFlagsRequiredForFlagCondition = self._cb.getFlagsRequiredForFlagCondition.__class__(
+ self._get_flags_required_for_flag_condition)
+ self._cb.getFlagsRequiredForSemanticFlagGroup = self._cb.getFlagsRequiredForSemanticFlagGroup.__class__(
+ self._get_flags_required_for_semantic_flag_group)
+ self._cb.getFlagConditionsForSemanticFlagGroup = self._cb.getFlagConditionsForSemanticFlagGroup.__class__(
+ self._get_flag_conditions_for_semantic_flag_group)
+ self._cb.freeFlagConditionsForSemanticFlagGroup = self._cb.freeFlagConditionsForSemanticFlagGroup.__class__(
+ self._free_flag_conditions_for_semantic_flag_group)
+ self._cb.getFlagsWrittenByFlagWriteType = self._cb.getFlagsWrittenByFlagWriteType.__class__(
+ self._get_flags_written_by_flag_write_type)
+ self._cb.getSemanticClassForFlagWriteType = self._cb.getSemanticClassForFlagWriteType.__class__(
+ self._get_semantic_class_for_flag_write_type)
+ self._cb.getFlagWriteLowLevelIL = self._cb.getFlagWriteLowLevelIL.__class__(
+ self._get_flag_write_low_level_il)
+ self._cb.getFlagConditionLowLevelIL = self._cb.getFlagConditionLowLevelIL.__class__(
+ self._get_flag_condition_low_level_il)
+ self._cb.getSemanticFlagGroupLowLevelIL = self._cb.getSemanticFlagGroupLowLevelIL.__class__(
+ self._get_semantic_flag_group_low_level_il)
+ self._cb.freeRegisterList = self._cb.freeRegisterList.__class__(self._free_register_list)
+ self._cb.getRegisterInfo = self._cb.getRegisterInfo.__class__(self._get_register_info)
+ self._cb.getStackPointerRegister = self._cb.getStackPointerRegister.__class__(
+ self._get_stack_pointer_register)
+ self._cb.getLinkRegister = self._cb.getLinkRegister.__class__(self._get_link_register)
+ self._cb.getGlobalRegisters = self._cb.getGlobalRegisters.__class__(self._get_global_registers)
+ self._cb.getRegisterStackName = self._cb.getRegisterStackName.__class__(self._get_register_stack_name)
+ self._cb.getAllRegisterStacks = self._cb.getAllRegisterStacks.__class__(self._get_all_register_stacks)
+ self._cb.getRegisterStackInfo = self._cb.getRegisterStackInfo.__class__(self._get_register_stack_info)
+ self._cb.getIntrinsicName = self._cb.getIntrinsicName.__class__(self._get_intrinsic_name)
+ self._cb.getAllIntrinsics = self._cb.getAllIntrinsics.__class__(self._get_all_intrinsics)
+ self._cb.getIntrinsicInputs = self._cb.getIntrinsicInputs.__class__(self._get_intrinsic_inputs)
+ self._cb.freeNameAndTypeList = self._cb.freeNameAndTypeList.__class__(self._free_name_and_type_list)
+ self._cb.getIntrinsicOutputs = self._cb.getIntrinsicOutputs.__class__(self._get_intrinsic_outputs)
+ self._cb.freeTypeList = self._cb.freeTypeList.__class__(self._free_type_list)
+ self._cb.assemble = self._cb.assemble.__class__(self._assemble)
+ self._cb.isNeverBranchPatchAvailable = self._cb.isNeverBranchPatchAvailable.__class__(
+ self._is_never_branch_patch_available)
+ self._cb.isAlwaysBranchPatchAvailable = self._cb.isAlwaysBranchPatchAvailable.__class__(
+ self._is_always_branch_patch_available)
+ self._cb.isInvertBranchPatchAvailable = self._cb.isInvertBranchPatchAvailable.__class__(
+ self._is_invert_branch_patch_available)
+ self._cb.isSkipAndReturnZeroPatchAvailable = self._cb.isSkipAndReturnZeroPatchAvailable.__class__(
+ self._is_skip_and_return_zero_patch_available)
+ self._cb.isSkipAndReturnValuePatchAvailable = self._cb.isSkipAndReturnValuePatchAvailable.__class__(
+ self._is_skip_and_return_value_patch_available)
+ self._cb.convertToNop = self._cb.convertToNop.__class__(self._convert_to_nop)
+ self._cb.alwaysBranch = self._cb.alwaysBranch.__class__(self._always_branch)
+ self._cb.invertBranch = self._cb.invertBranch.__class__(self._invert_branch)
+ self._cb.skipAndReturnValue = self._cb.skipAndReturnValue.__class__(self._skip_and_return_value)
+
+ self.__dict__["endianness"] = self.__class__.endianness
+ self.__dict__["address_size"] = self.__class__.address_size
+ self.__dict__["default_int_size"] = self.__class__.default_int_size
+ self.__dict__["instr_alignment"] = self.__class__.instr_alignment
+ self.__dict__["max_instr_length"] = self.__class__.max_instr_length
+ self.__dict__["opcode_display_length"] = self.__class__.opcode_display_length
+ self.__dict__["stack_pointer"] = self.__class__.stack_pointer
+ self.__dict__["link_reg"] = self.__class__.link_reg
+
+ self._all_regs = {}
+ self._full_width_regs = {}
+ self._regs_by_index = {}
+ self.__dict__["regs"] = self.__class__.regs
+ reg_index = 0
+
+ # Registers used for storage in register stacks must be sequential, so allocate these in order first
+ self._all_reg_stacks = {}
+ self._reg_stacks_by_index = {}
+ self.__dict__["reg_stacks"] = self.__class__.reg_stacks
+ reg_stack_index = 0
+ for reg_stack in self.reg_stacks:
+ info = self.reg_stacks[reg_stack]
+ for reg in info.storage_regs:
+ self._all_regs[reg] = reg_index
+ self._regs_by_index[reg_index] = reg
+ self.regs[reg].index = reg_index
+ reg_index += 1
+ for reg in info.top_relative_regs:
+ self._all_regs[reg] = reg_index
+ self._regs_by_index[reg_index] = reg
+ self.regs[reg].index = reg_index
+ reg_index += 1
+ if reg_stack not in self._all_reg_stacks:
+ self._all_reg_stacks[reg_stack] = reg_stack_index
+ self._reg_stacks_by_index[reg_stack_index] = reg_stack
+ self.reg_stacks[reg_stack].index = reg_stack_index
+ reg_stack_index += 1
+
+ for reg in self.regs:
+ info = self.regs[reg]
+ if reg not in self._all_regs:
+ self._all_regs[reg] = reg_index
+ self._regs_by_index[reg_index] = reg
+ self.regs[reg].index = reg_index
+ reg_index += 1
+ if info.full_width_reg not in self._all_regs:
+ self._all_regs[info.full_width_reg] = reg_index
+ self._regs_by_index[reg_index] = info.full_width_reg
+ self.regs[info.full_width_reg].index = reg_index
+ reg_index += 1
+ if info.full_width_reg not in self._full_width_regs:
+ self._full_width_regs[info.full_width_reg] = self._all_regs[info.full_width_reg]
+
+ self._flags = {}
+ self._flags_by_index = {}
+ self.__dict__["flags"] = self.__class__.flags
+ flag_index = 0
+ for flag in self.__class__.flags:
+ if flag not in self._flags:
+ self._flags[flag] = flag_index
+ self._flags_by_index[flag_index] = flag
+ flag_index += 1
+
+ self._flag_write_types = {}
+ self._flag_write_types_by_index = {}
+ self.__dict__["flag_write_types"] = self.__class__.flag_write_types
+ write_type_index = 0
+ for write_type in self.__class__.flag_write_types:
+ if write_type not in self._flag_write_types:
+ self._flag_write_types[write_type] = write_type_index
+ self._flag_write_types_by_index[write_type_index] = write_type
+ write_type_index += 1
+
+ self._semantic_flag_classes = {}
+ self._semantic_flag_classes_by_index = {}
+ self.__dict__["semantic_flag_classes"] = self.__class__.semantic_flag_classes
+ semantic_class_index = 1
+ for sem_class in self.__class__.semantic_flag_classes:
+ if sem_class not in self._semantic_flag_classes:
+ self._semantic_flag_classes[sem_class] = semantic_class_index
+ self._semantic_flag_classes_by_index[semantic_class_index] = sem_class
+ semantic_class_index += 1
+
+ self._semantic_flag_groups = {}
+ self._semantic_flag_groups_by_index = {}
+ self.__dict__["semantic_flag_groups"] = self.__class__.semantic_flag_groups
+ semantic_group_index = 0
+ for sem_group in self.__class__.semantic_flag_groups:
+ if sem_group not in self._semantic_flag_groups:
+ self._semantic_flag_groups[sem_group] = semantic_group_index
+ self._semantic_flag_groups_by_index[semantic_group_index] = sem_group
+ semantic_group_index += 1
+
+ self._flag_roles = {}
+ self.__dict__["flag_roles"] = self.__class__.flag_roles
+ for flag in self.__class__.flag_roles:
+ role = self.__class__.flag_roles[flag]
+ if isinstance(role, str):
+ role = FlagRole[role]
+ self._flag_roles[self._flags[flag]] = role
+
+ self.__dict__["flags_required_for_flag_condition"] = self.__class__.flags_required_for_flag_condition
+
+ self._flags_required_by_semantic_flag_group = {}
+ self.__dict__["flags_required_for_semantic_flag_group"] = self.__class__.flags_required_for_semantic_flag_group
+ for group in self.__class__.flags_required_for_semantic_flag_group:
+ flags = []
+ for flag in self.__class__.flags_required_for_semantic_flag_group[group]:
+ flags.append(self._flags[flag])
+ self._flags_required_by_semantic_flag_group[self._semantic_flag_groups[group]] = flags
+
+ self._flag_conditions_for_semantic_flag_group = {}
+ self.__dict__["flag_conditions_for_semantic_flag_group"] = self.__class__.flag_conditions_for_semantic_flag_group
+ for group in self.__class__.flag_conditions_for_semantic_flag_group:
+ class_cond = {}
+ for sem_class in self.__class__.flag_conditions_for_semantic_flag_group[group]:
+ if sem_class is None:
+ class_cond[0] = self.__class__.flag_conditions_for_semantic_flag_group[group][sem_class]
+ else:
+ class_cond[self._semantic_flag_classes[sem_class]] = self.__class__.flag_conditions_for_semantic_flag_group[group][sem_class]
+ self._flag_conditions_for_semantic_flag_group[self._semantic_flag_groups[group]] = class_cond
+
+ self._flags_written_by_flag_write_type = {}
+ self.__dict__["flags_written_by_flag_write_type"] = self.__class__.flags_written_by_flag_write_type
+ for write_type in self.__class__.flags_written_by_flag_write_type:
+ flags = []
+ for flag in self.__class__.flags_written_by_flag_write_type[write_type]:
+ flags.append(self._flags[flag])
+ self._flags_written_by_flag_write_type[self._flag_write_types[write_type]] = flags
+
+ self._semantic_class_for_flag_write_type = {}
+ self.__dict__["semantic_class_for_flag_write_type"] = self.__class__.semantic_class_for_flag_write_type
+ for write_type in self.__class__.semantic_class_for_flag_write_type:
+ sem_class = self.__class__.semantic_class_for_flag_write_type[write_type]
+ if sem_class in self._semantic_flag_classes:
+ sem_class_index = self._semantic_flag_classes[sem_class]
+ else:
+ sem_class_index = 0
+ self._semantic_class_for_flag_write_type[self._flag_write_types[write_type]] = sem_class_index
+
+ self.__dict__["global_regs"] = self.__class__.global_regs
+
+ self._intrinsics = {}
+ self._intrinsics_by_index = {}
+ self.__dict__["intrinsics"] = self.__class__.intrinsics
+ intrinsic_index = 0
+ for intrinsic in self.__class__.intrinsics.keys():
+ if intrinsic not in self._intrinsics:
+ info = self.__class__.intrinsics[intrinsic]
+ for i in range(0, len(info.inputs)):
+ if isinstance(info.inputs[i], types.Type):
+ info.inputs[i] = binaryninja.function.IntrinsicInput(info.inputs[i])
+ elif isinstance(info.inputs[i], tuple):
+ info.inputs[i] = binaryninja.function.IntrinsicInput(info.inputs[i][0], info.inputs[i][1])
+ info.index = intrinsic_index
+ self._intrinsics[intrinsic] = intrinsic_index
+ self._intrinsics_by_index[intrinsic_index] = (intrinsic, info)
+ intrinsic_index += 1
+
+ self._pending_reg_lists = {}
+ self._pending_token_lists = {}
+ self._pending_condition_lists = {}
+ self._pending_name_and_type_lists = {}
+ self._pending_type_lists = {}
+
+ def __eq__(self, value):
+ if not isinstance(value, Architecture):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, Architecture):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def list(self):
+ """Allow tab completion to discover metaclass list property"""
+ pass
+
+ @property
+ def full_width_regs(self):
+ """List of full width register strings (read-only)"""
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetFullWidthArchitectureRegisters(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(core.BNGetArchitectureRegisterName(self.handle, regs[i]))
+ core.BNFreeRegisterList(regs)
+ return result
+
+ @property
+ def calling_conventions(self):
+ """Dict of CallingConvention objects (read-only)"""
+ count = ctypes.c_ulonglong()
+ cc = core.BNGetArchitectureCallingConventions(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ obj = callingconvention.CallingConvention(handle=core.BNNewCallingConventionReference(cc[i]))
+ result[obj.name] = obj
+ core.BNFreeCallingConventionList(cc, count)
+ return result
+
+ @property
+ def standalone_platform(self):
+ """Architecture standalone platform (read-only)"""
+ pl = core.BNGetArchitectureStandalonePlatform(self.handle)
+ return platform.Platform(self, pl)
+
+ def __setattr__(self, name, value):
+ if ((name == "name") or (name == "endianness") or (name == "address_size") or
+ (name == "default_int_size") or (name == "regs") or (name == "get_max_instruction_length") or
+ (name == "get_instruction_alignment")):
+ raise AttributeError("attribute '%s' is read only" % name)
+ else:
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __repr__(self):
+ return "<arch: %s>" % self.name
+
+ def _init(self, ctxt, handle):
+ self.handle = handle
+
+ def _get_endianness(self, ctxt):
+ try:
+ return self.endianness
+ except:
+ log.log_error(traceback.format_exc())
+ return Endianness.LittleEndian
+
+ def _get_address_size(self, ctxt):
+ try:
+ return self.address_size
+ except:
+ log.log_error(traceback.format_exc())
+ return 8
+
+ def _get_default_integer_size(self, ctxt):
+ try:
+ return self.default_int_size
+ except:
+ log.log_error(traceback.format_exc())
+ return 4
+
+ def _get_instruction_alignment(self, ctxt):
+ try:
+ return self.instr_alignment
+ except:
+ log.log_error(traceback.format_exc())
+ return 1
+
+ def _get_max_instruction_length(self, ctxt):
+ try:
+ return self.max_instr_length
+ except:
+ log.log_error(traceback.format_exc())
+ return 16
+
+ def _get_opcode_display_length(self, ctxt):
+ try:
+ return self.opcode_display_length
+ except:
+ log.log_error(traceback.format_exc())
+ return 8
+
+ def _get_associated_arch_by_address(self, ctxt, addr):
+ try:
+ result, new_addr = self.get_associated_arch_by_address(addr[0])
+ addr[0] = new_addr
+ return ctypes.cast(result.handle, ctypes.c_void_p).value
+ except:
+ log.log_error(traceback.format_exc())
+ return ctypes.cast(self.handle, ctypes.c_void_p).value
+
+ def _get_instruction_info(self, ctxt, data, addr, max_len, result):
+ try:
+ buf = ctypes.create_string_buffer(max_len)
+ ctypes.memmove(buf, data, max_len)
+ info = self.get_instruction_info(buf.raw, addr)
+ if info is None:
+ return False
+ result[0].length = info.length
+ result[0].archTransitionByTargetAddr = info.arch_transition_by_target_addr
+ result[0].branchDelay = info.branch_delay
+ result[0].branchCount = len(info.branches)
+ for i in range(0, len(info.branches)):
+ if isinstance(info.branches[i].type, str):
+ result[0].branchType[i] = BranchType[info.branches[i].type]
+ else:
+ result[0].branchType[i] = info.branches[i].type
+ result[0].branchTarget[i] = info.branches[i].target
+ if info.branches[i].arch is None:
+ result[0].branchArch[i] = None
+ else:
+ result[0].branchArch[i] = info.branches[i].arch.handle
+ return True
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_instruction_text(self, ctxt, data, addr, length, result, count):
+ try:
+ buf = ctypes.create_string_buffer(length[0])
+ ctypes.memmove(buf, data, length[0])
+ info = self.get_instruction_text(buf.raw, addr)
+ if info is None:
+ return False
+ tokens = info[0]
+ length[0] = info[1]
+ count[0] = len(tokens)
+ token_buf = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens)
+ result[0] = token_buf
+ ptr = ctypes.cast(token_buf, ctypes.c_void_p)
+ self._pending_token_lists[ptr.value] = (ptr.value, token_buf)
+ return True
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _free_instruction_text(self, tokens, count):
+ try:
+ buf = ctypes.cast(tokens, ctypes.c_void_p)
+ if buf.value not in self._pending_token_lists:
+ raise ValueError("freeing token list that wasn't allocated")
+ del self._pending_token_lists[buf.value]
+ except KeyError:
+ log.log_error(traceback.format_exc())
+
+ def _get_instruction_low_level_il(self, ctxt, data, addr, length, il):
+ try:
+ buf = ctypes.create_string_buffer(length[0])
+ ctypes.memmove(buf, data, length[0])
+ result = self.get_instruction_low_level_il(buf.raw, addr,
+ lowlevelil.LowLevelILFunction(self, core.BNNewLowLevelILFunctionReference(il)))
+ if result is None:
+ return False
+ length[0] = result
+ return True
+ except OSError:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_register_name(self, ctxt, reg):
+ try:
+ if reg in self._regs_by_index:
+ return core.BNAllocString(self._regs_by_index[reg])
+ return core.BNAllocString("")
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return core.BNAllocString("")
+
+ def _get_flag_name(self, ctxt, flag):
+ try:
+ if flag in self._flags_by_index:
+ return core.BNAllocString(self._flags_by_index[flag])
+ return core.BNAllocString("")
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return core.BNAllocString("")
+
+ def _get_flag_write_type_name(self, ctxt, write_type):
+ try:
+ if write_type in self._flag_write_types_by_index:
+ return core.BNAllocString(self._flag_write_types_by_index[write_type])
+ return core.BNAllocString("")
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return core.BNAllocString("")
+
+ def _get_semantic_flag_class_name(self, ctxt, sem_class):
+ try:
+ if sem_class in self._semantic_flag_classes_by_index:
+ return core.BNAllocString(self._semantic_flag_classes_by_index[sem_class])
+ return core.BNAllocString("")
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return core.BNAllocString("")
+
+ def _get_semantic_flag_group_name(self, ctxt, sem_group):
+ try:
+ if sem_group in self._semantic_flag_groups_by_index:
+ return core.BNAllocString(self._semantic_flag_groups_by_index[sem_group])
+ return core.BNAllocString("")
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return core.BNAllocString("")
+
+ def _get_full_width_registers(self, ctxt, count):
+ try:
+ regs = list(self._full_width_regs.values())
+ count[0] = len(regs)
+ reg_buf = (ctypes.c_uint * len(regs))()
+ for i in range(0, len(regs)):
+ reg_buf[i] = regs[i]
+ result = ctypes.cast(reg_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, reg_buf)
+ return result.value
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_all_registers(self, ctxt, count):
+ try:
+ regs = list(self._regs_by_index.keys())
+ count[0] = len(regs)
+ reg_buf = (ctypes.c_uint * len(regs))()
+ for i in range(0, len(regs)):
+ reg_buf[i] = regs[i]
+ result = ctypes.cast(reg_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, reg_buf)
+ return result.value
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_all_flags(self, ctxt, count):
+ try:
+ flags = list(self._flags_by_index.keys())
+ count[0] = len(flags)
+ flag_buf = (ctypes.c_uint * len(flags))()
+ for i in range(0, len(flags)):
+ flag_buf[i] = flags[i]
+ result = ctypes.cast(flag_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, flag_buf)
+ return result.value
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_all_flag_write_types(self, ctxt, count):
+ try:
+ write_types = list(self._flag_write_types_by_index.keys())
+ count[0] = len(write_types)
+ type_buf = (ctypes.c_uint * len(write_types))()
+ for i in range(0, len(write_types)):
+ type_buf[i] = write_types[i]
+ result = ctypes.cast(type_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, type_buf)
+ return result.value
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_all_semantic_flag_classes(self, ctxt, count):
+ try:
+ sem_classes = list(self._semantic_flag_classes_by_index.keys())
+ count[0] = len(sem_classes)
+ class_buf = (ctypes.c_uint * len(sem_classes))()
+ for i in range(0, len(sem_classes)):
+ class_buf[i] = sem_classes[i]
+ result = ctypes.cast(class_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, class_buf)
+ return result.value
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_all_semantic_flag_groups(self, ctxt, count):
+ try:
+ sem_groups = list(self._semantic_flag_groups_by_index.keys())
+ count[0] = len(sem_groups)
+ group_buf = (ctypes.c_uint * len(sem_groups))()
+ for i in range(0, len(sem_groups)):
+ group_buf[i] = sem_groups[i]
+ result = ctypes.cast(group_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, group_buf)
+ return result.value
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_flag_role(self, ctxt, flag, sem_class):
+ try:
+ if sem_class in self._semantic_flag_classes_by_index:
+ sem_class = self._semantic_flag_classes_by_index[sem_class]
+ else:
+ sem_class = None
+ return self.get_flag_role(flag, sem_class)
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ return FlagRole.SpecialFlagRole
+
+ def _get_flags_required_for_flag_condition(self, ctxt, cond, sem_class, count):
+ try:
+ if sem_class in self._semantic_flag_classes_by_index:
+ sem_class = self._semantic_flag_classes_by_index[sem_class]
+ else:
+ sem_class = None
+ flag_names = self.get_flags_required_for_flag_condition(cond, sem_class)
+ flags = []
+ for name in flag_names:
+ flags.append(self._flags[name])
+ count[0] = len(flags)
+ flag_buf = (ctypes.c_uint * len(flags))()
+ for i in range(0, len(flags)):
+ flag_buf[i] = flags[i]
+ result = ctypes.cast(flag_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, flag_buf)
+ return result.value
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_flags_required_for_semantic_flag_group(self, ctxt, sem_group, count):
+ try:
+ if sem_group in self._flags_required_by_semantic_flag_group:
+ flags = self._flags_required_by_semantic_flag_group[sem_group]
+ else:
+ flags = []
+ count[0] = len(flags)
+ flag_buf = (ctypes.c_uint * len(flags))()
+ for i in range(0, len(flags)):
+ flag_buf[i] = flags[i]
+ result = ctypes.cast(flag_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, flag_buf)
+ return result.value
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_flag_conditions_for_semantic_flag_group(self, ctxt, sem_group, count):
+ try:
+ if sem_group in self._flag_conditions_for_semantic_flag_group:
+ class_cond = self._flag_conditions_for_semantic_flag_group[sem_group]
+ else:
+ class_cond = {}
+ count[0] = len(class_cond)
+ cond_buf = (core.BNFlagConditionForSemanticClass * len(class_cond))()
+ i = 0
+ for class_index in class_cond.keys():
+ cond_buf[i].semanticClass = class_index
+ cond_buf[i].condition = class_cond[class_index]
+ i += 1
+ result = ctypes.cast(cond_buf, ctypes.c_void_p)
+ self._pending_condition_lists[result.value] = (result, cond_buf)
+ return result.value
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _free_flag_conditions_for_semantic_flag_group(self, ctxt, conditions):
+ try:
+ buf = ctypes.cast(conditions, ctypes.c_void_p)
+ if buf.value not in self._pending_condition_lists:
+ raise ValueError("freeing condition list that wasn't allocated")
+ del self._pending_condition_lists[buf.value]
+ except (ValueError, KeyError):
+ log.log_error(traceback.format_exc())
+
+ def _get_flags_written_by_flag_write_type(self, ctxt, write_type, count):
+ try:
+ if write_type in self._flags_written_by_flag_write_type:
+ flags = self._flags_written_by_flag_write_type[write_type]
+ else:
+ flags = []
+ count[0] = len(flags)
+ flag_buf = (ctypes.c_uint * len(flags))()
+ for i in range(0, len(flags)):
+ flag_buf[i] = flags[i]
+ result = ctypes.cast(flag_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, flag_buf)
+ return result.value
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_semantic_class_for_flag_write_type(self, ctxt, write_type):
+ try:
+ if write_type in self._semantic_class_for_flag_write_type:
+ return self._semantic_class_for_flag_write_type[write_type]
+ else:
+ return 0
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _get_flag_write_low_level_il(self, ctxt, op, size, write_type, flag, operands, operand_count, il):
+ try:
+ write_type_name = None
+ if write_type != 0:
+ write_type_name = self._flag_write_types_by_index[write_type]
+ flag_name = self._flags_by_index[flag]
+ operand_list = []
+ for i in range(operand_count):
+ if operands[i].constant:
+ operand_list.append(operands[i].value)
+ elif lowlevelil.LLIL_REG_IS_TEMP(operands[i].reg):
+ operand_list.append(lowlevelil.ILRegister(self, operands[i].reg))
+ else:
+ operand_list.append(lowlevelil.ILRegister(self, operands[i].reg))
+ return self.get_flag_write_low_level_il(op, size, write_type_name, flag_name, operand_list,
+ lowlevelil.LowLevelILFunction(self, core.BNNewLowLevelILFunctionReference(il))).index
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_flag_condition_low_level_il(self, ctxt, cond, sem_class, il):
+ try:
+ if sem_class in self._semantic_flag_classes_by_index:
+ sem_class_name = self._semantic_flag_classes_by_index[sem_class]
+ else:
+ sem_class_name = None
+ return self.get_flag_condition_low_level_il(cond, sem_class_name,
+ lowlevelil.LowLevelILFunction(self, core.BNNewLowLevelILFunctionReference(il))).index
+ except OSError:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _get_semantic_flag_group_low_level_il(self, ctxt, sem_group, il):
+ try:
+ if sem_group in self._semantic_flag_groups_by_index:
+ sem_group_name = self._semantic_flag_groups_by_index[sem_group]
+ else:
+ sem_group_name = None
+ return self.get_semantic_flag_group_low_level_il(sem_group_name,
+ lowlevelil.LowLevelILFunction(self, core.BNNewLowLevelILFunctionReference(il))).index
+ except OSError:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _free_register_list(self, ctxt, regs):
+ try:
+ buf = ctypes.cast(regs, ctypes.c_void_p)
+ if buf.value not in self._pending_reg_lists:
+ raise ValueError("freeing register list that wasn't allocated")
+ del self._pending_reg_lists[buf.value]
+ except (ValueError, KeyError):
+ log.log_error(traceback.format_exc())
+
+ def _get_register_info(self, ctxt, reg, result):
+ try:
+ if reg not in self._regs_by_index:
+ result[0].fullWidthRegister = 0
+ result[0].offset = 0
+ result[0].size = 0
+ result[0].extend = ImplicitRegisterExtend.NoExtend
+ return
+ info = self.regs[self._regs_by_index[reg]]
+ result[0].fullWidthRegister = self._all_regs[info.full_width_reg]
+ result[0].offset = info.offset
+ result[0].size = info.size
+ if isinstance(info.extend, str):
+ result[0].extend = ImplicitRegisterExtend[info.extend]
+ else:
+ result[0].extend = info.extend
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ result[0].fullWidthRegister = 0
+ result[0].offset = 0
+ result[0].size = 0
+ result[0].extend = ImplicitRegisterExtend.NoExtend
+
+ def _get_stack_pointer_register(self, ctxt):
+ try:
+ return self._all_regs[self.stack_pointer]
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _get_link_register(self, ctxt):
+ try:
+ if self.link_reg is None:
+ return 0xffffffff
+ return self._all_regs[self.link_reg]
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _get_global_registers(self, ctxt, count):
+ try:
+ count[0] = len(self.global_regs)
+ reg_buf = (ctypes.c_uint * len(self.global_regs))()
+ for i in range(0, len(self.global_regs)):
+ reg_buf[i] = self._all_regs[self.global_regs[i]]
+ result = ctypes.cast(reg_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, reg_buf)
+ return result.value
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_register_stack_name(self, ctxt, reg_stack):
+ try:
+ if reg_stack in self._reg_stacks_by_index:
+ return core.BNAllocString(self._reg_stacks_by_index[reg_stack])
+ return core.BNAllocString("")
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return core.BNAllocString("")
+
+ def _get_all_register_stacks(self, ctxt, count):
+ try:
+ regs = list(self._reg_stacks_by_index.keys())
+ count[0] = len(regs)
+ reg_buf = (ctypes.c_uint * len(regs))()
+ for i in range(0, len(regs)):
+ reg_buf[i] = regs[i]
+ result = ctypes.cast(reg_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, reg_buf)
+ return result.value
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_register_stack_info(self, ctxt, reg_stack, result):
+ try:
+ if reg_stack not in self._reg_stacks_by_index:
+ result[0].firstStorageReg = 0
+ result[0].firstTopRelativeReg = 0
+ result[0].storageCount = 0
+ result[0].topRelativeCount = 0
+ result[0].stackTopReg = 0
+ return
+ info = self.reg_stacks[self._reg_stacks_by_index[reg_stack]]
+ result[0].firstStorageReg = self._all_regs[info.storage_regs[0]]
+ result[0].storageCount = len(info.storage_regs)
+ if len(info.top_relative_regs) > 0:
+ result[0].firstTopRelativeReg = self._all_regs[info.top_relative_regs[0]]
+ result[0].topRelativeCount = len(info.top_relative_regs)
+ else:
+ result[0].firstTopRelativeReg = 0
+ result[0].topRelativeCount = 0
+ result[0].stackTopReg = self._all_regs[info.stack_top_reg]
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ result[0].firstStorageReg = 0
+ result[0].firstTopRelativeReg = 0
+ result[0].storageCount = 0
+ result[0].topRelativeCount = 0
+ result[0].stackTopReg = 0
+
+ def _get_intrinsic_name(self, ctxt, intrinsic):
+ try:
+ if intrinsic in self._intrinsics_by_index:
+ return core.BNAllocString(self._intrinsics_by_index[intrinsic][0])
+ return core.BNAllocString("")
+ except (KeyError, OSError):
+ log.log_error(traceback.format_exc())
+ return core.BNAllocString("")
+
+ def _get_all_intrinsics(self, ctxt, count):
+ try:
+ regs = list(self._intrinsics_by_index.keys())
+ count[0] = len(regs)
+ reg_buf = (ctypes.c_uint * len(regs))()
+ for i in range(0, len(regs)):
+ reg_buf[i] = regs[i]
+ result = ctypes.cast(reg_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, reg_buf)
+ return result.value
+ except KeyError:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_intrinsic_inputs(self, ctxt, intrinsic, count):
+ try:
+ if intrinsic in self._intrinsics_by_index:
+ inputs = self._intrinsics_by_index[intrinsic][1].inputs
+ count[0] = len(inputs)
+ input_buf = (core.BNNameAndType * len(inputs))()
+ for i in range(0, len(inputs)):
+ input_buf[i].name = inputs[i].name
+ input_buf[i].type = core.BNNewTypeReference(inputs[i].type.handle)
+ input_buf[i].typeConfidence = inputs[i].type.confidence
+ result = ctypes.cast(input_buf, ctypes.c_void_p)
+ self._pending_name_and_type_lists[result.value] = (result, input_buf, len(inputs))
+ return result.value
+ count[0] = 0
+ return None
+ except:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _free_name_and_type_list(self, ctxt, buf_raw, length):
+ try:
+ buf = ctypes.cast(buf_raw, ctypes.c_void_p)
+ if buf.value not in self._pending_name_and_type_lists:
+ raise ValueError("freeing name and type list that wasn't allocated")
+ name_and_types = self._pending_name_and_type_lists[buf.value][1]
+ count = self._pending_name_and_type_lists[buf.value][2]
+ for i in range(0, count):
+ core.BNFreeType(name_and_types[i].type)
+ del self._pending_name_and_type_lists[buf.value]
+ except (ValueError, KeyError):
+ log.log_error(traceback.format_exc())
+
+ def _get_intrinsic_outputs(self, ctxt, intrinsic, count):
+ try:
+ if intrinsic in self._intrinsics_by_index:
+ outputs = self._intrinsics_by_index[intrinsic][1].outputs
+ count[0] = len(outputs)
+ output_buf = (core.BNTypeWithConfidence * len(outputs))()
+ for i in range(0, len(outputs)):
+ output_buf[i].type = core.BNNewTypeReference(outputs[i].handle)
+ output_buf[i].confidence = outputs[i].confidence
+ result = ctypes.cast(output_buf, ctypes.c_void_p)
+ self._pending_type_lists[result.value] = (result, output_buf, len(outputs))
+ return result.value
+ count[0] = 0
+ return None
+ except:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _free_type_list(self, ctxt, buf_raw, length):
+ try:
+ buf = ctypes.cast(buf_raw, ctypes.c_void_p)
+ if buf.value not in self._pending_type_lists:
+ raise ValueError("freeing type list that wasn't allocated")
+ types = self._pending_type_lists[buf.value][1]
+ count = self._pending_type_lists[buf.value][2]
+ for i in range(0, count):
+ core.BNFreeType(types[i].type)
+ del self._pending_type_lists[buf.value]
+ except (ValueError, KeyError):
+ log.log_error(traceback.format_exc())
+
+ def _assemble(self, ctxt, code, addr, result, errors):
+ try:
+ data, error_str = self.assemble(code, addr)
+ errors[0] = core.BNAllocString(str(error_str))
+ if data is None:
+ return False
+ buf = ctypes.create_string_buffer(len(data))
+ ctypes.memmove(buf, data, len(data))
+ core.BNSetDataBufferContents(result, buf, len(data))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+ errors[0] = core.BNAllocString("Unhandled exception during assembly.\n")
+ return False
+
+ def _is_never_branch_patch_available(self, ctxt, data, addr, length):
+ try:
+ buf = ctypes.create_string_buffer(length)
+ ctypes.memmove(buf, data, length)
+ return self.is_never_branch_patch_available(buf.raw, addr)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _is_always_branch_patch_available(self, ctxt, data, addr, length):
+ try:
+ buf = ctypes.create_string_buffer(length)
+ ctypes.memmove(buf, data, length)
+ return self.is_always_branch_patch_available(buf.raw, addr)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _is_invert_branch_patch_available(self, ctxt, data, addr, length):
+ try:
+ buf = ctypes.create_string_buffer(length)
+ ctypes.memmove(buf, data, length)
+ return self.is_invert_branch_patch_available(buf.raw, addr)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _is_skip_and_return_zero_patch_available(self, ctxt, data, addr, length):
+ try:
+ buf = ctypes.create_string_buffer(length)
+ ctypes.memmove(buf, data, length)
+ return self.is_skip_and_return_zero_patch_available(buf.raw, addr)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _is_skip_and_return_value_patch_available(self, ctxt, data, addr, length):
+ try:
+ buf = ctypes.create_string_buffer(length)
+ ctypes.memmove(buf, data, length)
+ return self.is_skip_and_return_value_patch_available(buf.raw, addr)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _convert_to_nop(self, ctxt, data, addr, length):
+ try:
+ buf = ctypes.create_string_buffer(length)
+ ctypes.memmove(buf, data, length)
+ result = self.convert_to_nop(buf.raw, addr)
+ if result is None:
+ return False
+ result = str(result)
+ if len(result) > length:
+ result = result[0:length]
+ ctypes.memmove(data, result, len(result))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _always_branch(self, ctxt, data, addr, length):
+ try:
+ buf = ctypes.create_string_buffer(length)
+ ctypes.memmove(buf, data, length)
+ result = self.always_branch(buf.raw, addr)
+ if result is None:
+ return False
+ result = str(result)
+ if len(result) > length:
+ result = result[0:length]
+ ctypes.memmove(data, result, len(result))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _invert_branch(self, ctxt, data, addr, length):
+ try:
+ buf = ctypes.create_string_buffer(length)
+ ctypes.memmove(buf, data, length)
+ result = self.invert_branch(buf.raw, addr)
+ if result is None:
+ return False
+ result = str(result)
+ if len(result) > length:
+ result = result[0:length]
+ ctypes.memmove(data, result, len(result))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _skip_and_return_value(self, ctxt, data, addr, length, value):
+ try:
+ buf = ctypes.create_string_buffer(length)
+ ctypes.memmove(buf, data, length)
+ result = self.skip_and_return_value(buf.raw, addr, value)
+ if result is None:
+ return False
+ result = str(result)
+ if len(result) > length:
+ result = result[0:length]
+ ctypes.memmove(data, result, len(result))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+[docs] def perform_get_associated_arch_by_address(self, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``get_associated_arch_by_address``.
+ """
+ return self, addr
+
+[docs] @abc.abstractmethod
+ def perform_get_instruction_info(self, data, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``get_instruction_info``.
+
+ :param str data: bytes to decode
+ :param int addr: virtual address of the byte to be decoded
+ :return: a :py:class:`InstructionInfo` object containing the length and branch types for the given instruction
+ :rtype: InstructionInfo
+ """
+ raise NotImplementedError
+
+[docs] @abc.abstractmethod
+ def perform_get_instruction_text(self, data, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``get_instruction_text``.
+
+ :param str data: bytes to decode
+ :param int addr: virtual address of the byte to be decoded
+ :return: a tuple of list(InstructionTextToken) and length of instruction decoded
+ :rtype: tuple(list(InstructionTextToken), int)
+ """
+ raise NotImplementedError
+
+[docs] @abc.abstractmethod
+ def perform_get_instruction_low_level_il(self, data, addr, il):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``get_instruction_low_level_il``.
+
+ :param str data: bytes to be interpreted as low-level IL instructions
+ :param int addr: virtual address of start of ``data``
+ :param LowLevelILFunction il: LowLevelILFunction object to append LowLevelILExpr objects to
+ :rtype: length of bytes read on success, None on failure
+ """
+ raise NotImplementedError
+
+[docs] @abc.abstractmethod
+ def perform_get_flag_write_low_level_il(self, op, size, write_type, flag, operands, il):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``get_flag_write_low_level_il``.
+
+ :param LowLevelILOperation op:
+ :param int size:
+ :param int write_type:
+ :param int flag:
+ :param list(int_or_str):
+ :param LowLevelILFunction il:
+ :rtype: LowLevelILExpr
+ """
+ flag = self.get_flag_index(flag)
+ if flag not in self._flag_roles:
+ return il.unimplemented()
+ return self.get_default_flag_write_low_level_il(op, size, self._flag_roles[flag], operands, il)
+
+[docs] @abc.abstractmethod
+ def perform_get_flag_condition_low_level_il(self, cond, sem_class, il):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``get_flag_condition_low_level_il``.
+
+ :param LowLevelILFlagCondition cond: Flag condition to be computed
+ :param str sem_class: Semantic class to be used (None for default semantics)
+ :param LowLevelILFunction il: LowLevelILFunction object to append LowLevelILExpr objects to
+ :rtype: LowLevelILExpr
+ """
+ return self.get_default_flag_condition_low_level_il(cond, sem_class, il)
+
+[docs] @abc.abstractmethod
+ def perform_get_semantic_flag_group_low_level_il(self, sem_group, il):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``get_semantic_flag_group_low_level_il``.
+
+ :param str sem_group: Semantic group to be computed
+ :param LowLevelILFunction il: LowLevelILFunction object to append LowLevelILExpr objects to
+ :rtype: LowLevelILExpr
+ """
+ return il.unimplemented()
+
+[docs] @abc.abstractmethod
+ def perform_assemble(self, code, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``assemble``.
+
+ :param str code: string representation of the instructions to be assembled
+ :param int addr: virtual address that the instructions will be loaded at
+ :return: the bytes for the assembled instructions or error string
+ :rtype: (a tuple of instructions and empty string) or (or None and error string)
+ """
+ return None, "Architecture does not implement an assembler.\n"
+
+[docs] @abc.abstractmethod
+ def perform_is_never_branch_patch_available(self, data, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``is_never_branch_patch_available``.
+
+ .. note:: Architecture subclasses should implement this method.
+ .. warning:: This method should never be called directly.
+
+ :param str data: bytes to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ """
+ return False
+
+[docs] @abc.abstractmethod
+ def perform_is_always_branch_patch_available(self, data, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``is_always_branch_patch_available``.
+
+ :param str data: bytes to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ """
+ return False
+
+[docs] @abc.abstractmethod
+ def perform_is_invert_branch_patch_available(self, data, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``is_invert_branch_patch_available``.
+
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ """
+ return False
+
+[docs] @abc.abstractmethod
+ def perform_is_skip_and_return_zero_patch_available(self, data, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``is_skip_and_return_zero_patch_available``.
+
+ :param str data: bytes to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ """
+ return False
+
+[docs] @abc.abstractmethod
+ def perform_is_skip_and_return_value_patch_available(self, data, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``is_skip_and_return_value_patch_available``.
+
+ :param str data: bytes to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ """
+ return False
+
+[docs] @abc.abstractmethod
+ def perform_convert_to_nop(self, data, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``convert_to_nop``.
+
+ :param str data: bytes at virtual address ``addr``
+ :param int addr: the virtual address of the instruction to be patched
+ :return: nop sequence of same length as ``data`` or None
+ :rtype: str or None
+ """
+ return None
+
+[docs] @abc.abstractmethod
+ def perform_always_branch(self, data, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``always_branch``.
+
+ :param str data: bytes to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: The bytes of the replacement unconditional branch instruction
+ :rtype: str
+ """
+ return None
+
+[docs] @abc.abstractmethod
+ def perform_invert_branch(self, data, addr):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``invert_branch``.
+
+ :param str data: bytes to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: The bytes of the replacement unconditional branch instruction
+ :rtype: str
+ """
+ return None
+
+[docs] @abc.abstractmethod
+ def perform_skip_and_return_value(self, data, addr, value):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``skip_and_return_value``.
+
+ :param str data: bytes to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :param int value: value to be returned
+ :return: The bytes of the replacement unconditional branch instruction
+ :rtype: str
+ """
+ return None
+
+[docs] def perform_get_flag_role(self, flag, sem_class):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``get_flag_role``.
+ """
+ if flag in self._flag_roles:
+ return self._flag_roles[flag]
+ return FlagRole.SpecialFlagRole
+
+[docs] def perform_get_flags_required_for_flag_condition(self, cond, sem_class):
+ """
+ Deprecated method provided for compatibility. Architecture plugins should override ``get_flags_required_for_flag_condition``.
+ """
+ if cond in self.flags_required_for_flag_condition:
+ return self.flags_required_for_flag_condition[cond]
+ return []
+
+[docs] def get_associated_arch_by_address(self, addr):
+ return self.perform_get_associated_arch_by_address(addr)
+
+[docs] def get_instruction_info(self, data, addr):
+ """
+ ``get_instruction_info`` returns an InstructionInfo object for the instruction at the given virtual address
+ ``addr`` with data ``data``.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ .. note :: The instruction info object should always set the InstructionInfo.length to the instruction length, \
+ and the branches of the proper types should be added if the instruction is a branch.
+
+ If the instruction is a branch instruction architecture plugins should add a branch of the proper type:
+
+ ===================== ===================================================
+ BranchType Description
+ ===================== ===================================================
+ UnconditionalBranch Branch will always be taken
+ FalseBranch False branch condition
+ TrueBranch True branch condition
+ CallDestination Branch is a call instruction (Branch with Link)
+ FunctionReturn Branch returns from a function
+ SystemCall System call instruction
+ IndirectBranch Branch destination is a memory address or register
+ UnresolvedBranch Branch destination is an unknown address
+ ===================== ===================================================
+
+ :param str data: max_instruction_length bytes from the binary at virtual address ``addr``
+ :param int addr: virtual address of bytes in ``data``
+ :return: the InstructionInfo for the current instruction
+ :rtype: InstructionInfo
+ """
+ return self.perform_get_instruction_info(data, addr)
+
+[docs] def get_instruction_text(self, data, addr):
+ """
+ ``get_instruction_text`` returns a list of InstructionTextToken objects for the instruction at the given virtual
+ address ``addr`` with data ``data``.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: max_instruction_length bytes from the binary at virtual address ``addr``
+ :param int addr: virtual address of bytes in ``data``
+ :return: an InstructionTextToken list for the current instruction
+ :rtype: list(InstructionTextToken)
+ """
+ return self.perform_get_instruction_text(data, addr)
+
+[docs] def get_instruction_low_level_il_instruction(self, bv, addr):
+ il = lowlevelil.LowLevelILFunction(self)
+ data = bv.read(addr, self.max_instr_length)
+ self.get_instruction_low_level_il(data, addr, il)
+ return il[0]
+
+[docs] def get_instruction_low_level_il(self, data, addr, il):
+ """
+ ``get_instruction_low_level_il`` appends LowLevelILExpr objects to ``il`` for the instruction at the given
+ virtual address ``addr`` with data ``data``.
+
+ This is used to analyze arbitrary data at an address, if you are working with an existing binary, you likely
+ want to be using ``Function.get_low_level_il_at``.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: max_instruction_length bytes from the binary at virtual address ``addr``
+ :param int addr: virtual address of bytes in ``data``
+ :param LowLevelILFunction il: The function the current instruction belongs to
+ :return: the length of the current instruction
+ :rtype: int
+ """
+ return self.perform_get_instruction_low_level_il(data, addr, il)
+
+[docs] def get_low_level_il_from_bytes(self, data, addr):
+ """
+ ``get_low_level_il_from_bytes`` converts the instruction in bytes to ``il`` at the given virtual address
+
+ :param str data: the bytes of the instruction
+ :param int addr: virtual address of bytes in ``data``
+ :return: the instruction
+ :rtype: LowLevelILInstruction
+ :Example:
+
+ >>> arch.get_low_level_il_from_bytes('\xeb\xfe', 0x40DEAD)
+ <il: jump(0x40dead)>
+ >>>
+ """
+ func = lowlevelil.LowLevelILFunction(self)
+ self.get_instruction_low_level_il(data, addr, func)
+ return func[0]
+
+[docs] def get_reg_name(self, reg):
+ """
+ ``get_reg_name`` gets a register name from a register number.
+
+ :param int reg: register number
+ :return: the corresponding register string
+ :rtype: str
+ """
+ return core.BNGetArchitectureRegisterName(self.handle, reg)
+
+[docs] def get_reg_stack_name(self, reg_stack):
+ """
+ ``get_reg_stack_name`` gets a register stack name from a register stack number.
+
+ :param int reg_stack: register stack number
+ :return: the corresponding register string
+ :rtype: str
+ """
+ return core.BNGetArchitectureRegisterStackName(self.handle, reg_stack)
+
+[docs] def get_reg_stack_for_reg(self, reg):
+ reg = self.get_reg_index(reg)
+ result = core.BNGetArchitectureRegisterStackForRegister(self.handle, reg)
+ if result == 0xffffffff:
+ return None
+ return self.get_reg_stack_name(result)
+
+[docs] def get_flag_name(self, flag):
+ """
+ ``get_flag_name`` gets a flag name from a flag number.
+
+ :param int reg: register number
+ :return: the corresponding register string
+ :rtype: str
+ """
+ return core.BNGetArchitectureFlagName(self.handle, flag)
+
+[docs] def get_reg_index(self, reg):
+ if isinstance(reg, str):
+ return self.regs[reg].index
+ elif isinstance(reg, lowlevelil.ILRegister):
+ return reg.index
+ return reg
+
+[docs] def get_reg_stack_index(self, reg_stack):
+ if isinstance(reg_stack, str):
+ return self.reg_stacks[reg_stack].index
+ elif isinstance(reg_stack, lowlevelil.ILRegisterStack):
+ return reg_stack.index
+ return reg_stack
+
+[docs] def get_flag_index(self, flag):
+ if isinstance(flag, str):
+ return self._flags[flag]
+ elif isinstance(flag, lowlevelil.ILFlag):
+ return flag.index
+ return flag
+
+[docs] def get_semantic_flag_class_index(self, sem_class):
+ if sem_class is None:
+ return 0
+ elif isinstance(sem_class, str):
+ return self._semantic_flag_classes[sem_class]
+ elif isinstance(sem_class, lowlevelil.ILSemanticFlagClass):
+ return sem_class.index
+ return sem_class
+
+[docs] def get_semantic_flag_class_name(self, class_index):
+ """
+ ``get_semantic_flag_class_name`` gets the name of a semantic flag class from the index.
+
+ :param int _index: class_index
+ :return: the name of the semantic flag class
+ :rtype: str
+ """
+ if not isinstance(class_index, (int, long)):
+ raise ValueError("argument 'class_index' must be an integer")
+ try:
+ return self._semantic_flag_classes_by_index[class_index]
+ except KeyError:
+ raise AttributeError("argument class_index is not a valid class index")
+
+[docs] def get_semantic_flag_group_index(self, sem_group):
+ if isinstance(sem_group, str):
+ return self._semantic_flag_groups[sem_group]
+ elif isinstance(sem_group, lowlevelil.ILSemanticFlagGroup):
+ return sem_group.index
+ return sem_group
+
+[docs] def get_semantic_flag_group_name(self, group_index):
+ """
+ ``get_semantic_flag_group_name`` gets the name of a semantic flag group from the index.
+
+ :param int group_index: group_index
+ :return: the name of the semantic flag group
+ :rtype: str
+ """
+ if not isinstance(group_index, (int, long)):
+ raise ValueError("argument 'group_index' must be an integer")
+ try:
+ return self._semantic_flag_groups_by_index[group_index]
+ except KeyError:
+ raise AttributeError("argument group_index is not a valid group index")
+
+[docs] def get_intrinsic_name(self, intrinsic):
+ """
+ ``get_intrinsic_name`` gets an intrinsic name from an intrinsic number.
+
+ :param int intrinsic: intrinsic number
+ :return: the corresponding intrinsic string
+ :rtype: str
+ """
+ return core.BNGetArchitectureIntrinsicName(self.handle, intrinsic)
+
+[docs] def get_intrinsic_index(self, intrinsic):
+ if isinstance(intrinsic, str):
+ return self._intrinsics[intrinsic]
+ elif isinstance(intrinsic, lowlevelil.ILIntrinsic):
+ return intrinsic.index
+ return intrinsic
+
+[docs] def get_flag_write_type_name(self, write_type):
+ """
+ ``get_flag_write_type_name`` gets the flag write type name for the given flag.
+
+ :param int write_type: flag
+ :return: flag write type name
+ :rtype: str
+ """
+ return core.BNGetArchitectureFlagWriteTypeName(self.handle, write_type)
+
+[docs] def get_flag_by_name(self, flag):
+ """
+ ``get_flag_by_name`` get flag name for flag index.
+
+ :param int flag: flag index
+ :return: flag name for flag index
+ :rtype: str
+ """
+ return self._flags[flag]
+
+[docs] def get_flag_write_type_by_name(self, write_type):
+ """
+ ``get_flag_write_type_by_name`` gets the flag write type name for the flag write type.
+
+ :param int write_type: flag write type
+ :return: flag write type
+ :rtype: str
+ """
+ return self._flag_write_types[write_type]
+
+[docs] def get_semantic_flag_class_by_name(self, sem_class):
+ """
+ ``get_semantic_flag_class_by_name`` gets the semantic flag class index by name.
+
+ :param int sem_class: semantic flag class
+ :return: semantic flag class index
+ :rtype: str
+ """
+ return self._semantic_flag_classes[sem_class]
+
+[docs] def get_semantic_flag_group_by_name(self, sem_group):
+ """
+ ``get_semantic_flag_group_by_name`` gets the semantic flag group index by name.
+
+ :param int sem_group: semantic flag group
+ :return: semantic flag group index
+ :rtype: str
+ """
+ return self._semantic_flag_groups[sem_group]
+
+[docs] def get_flag_role(self, flag, sem_class = None):
+ """
+ ``get_flag_role`` gets the role of a given flag.
+
+ :param int flag: flag
+ :param int sem_class: optional semantic flag class
+ :return: flag role
+ :rtype: FlagRole
+ """
+ return self.perform_get_flag_role(flag, sem_class)
+
+[docs] def get_flag_write_low_level_il(self, op, size, write_type, flag, operands, il):
+ """
+ :param LowLevelILOperation op:
+ :param int size:
+ :param str write_type:
+ :param list(str or int) operands: a list of either items that are either string register names or constant \
+ integer values
+ :param LowLevelILFunction il:
+ :rtype: LowLevelILExpr
+ """
+ return self.perform_get_flag_write_low_level_il(op, size, write_type, flag, operands, il)
+
+[docs] def get_default_flag_write_low_level_il(self, op, size, role, operands, il):
+ """
+ :param LowLevelILOperation op:
+ :param int size:
+ :param FlagRole role:
+ :param list(str or int) operands: a list of either items that are either string register names or constant \
+ integer values
+ :param LowLevelILFunction il:
+ :rtype: LowLevelILExpr index
+ """
+ operand_list = (core.BNRegisterOrConstant * len(operands))()
+ for i in range(len(operands)):
+ if isinstance(operands[i], str):
+ operand_list[i].constant = False
+ operand_list[i].reg = self.regs[operands[i]].index
+ elif isinstance(operands[i], lowlevelil.ILRegister):
+ operand_list[i].constant = False
+ operand_list[i].reg = operands[i].index
+ else:
+ operand_list[i].constant = True
+ operand_list[i].value = operands[i]
+ return lowlevelil.LowLevelILExpr(core.BNGetDefaultArchitectureFlagWriteLowLevelIL(self.handle, op, size,
+ role, operand_list, len(operand_list), il.handle))
+
+[docs] def get_flag_condition_low_level_il(self, cond, sem_class, il):
+ """
+ :param LowLevelILFlagCondition cond: Flag condition to be computed
+ :param str sem_class: Semantic class to be used (None for default semantics)
+ :param LowLevelILFunction il: LowLevelILFunction object to append LowLevelILExpr objects to
+ :rtype: LowLevelILExpr
+ """
+ return self.perform_get_flag_condition_low_level_il(cond, sem_class, il)
+
+[docs] def get_default_flag_condition_low_level_il(self, cond, sem_class, il):
+ """
+ :param LowLevelILFlagCondition cond:
+ :param LowLevelILFunction il:
+ :param str sem_class:
+ :rtype: LowLevelILExpr
+ """
+ class_index = self.get_semantic_flag_class_index(sem_class)
+ return lowlevelil.LowLevelILExpr(core.BNGetDefaultArchitectureFlagConditionLowLevelIL(self.handle, cond, class_index, il.handle))
+
+[docs] def get_semantic_flag_group_low_level_il(self, sem_group, il):
+ """
+ :param str sem_group:
+ :param LowLevelILFunction il:
+ :rtype: LowLevelILExpr
+ """
+ return self.perform_get_semantic_flag_group_low_level_il(sem_group, il)
+
+[docs] def get_flags_required_for_flag_condition(self, cond, sem_class = None):
+ return self.perform_get_flags_required_for_flag_condition(cond, sem_class)
+
+[docs] def get_modified_regs_on_write(self, reg):
+ """
+ ``get_modified_regs_on_write`` returns a list of register names that are modified when ``reg`` is written.
+
+ :param str reg: string register name
+ :return: list of register names
+ :rtype: list(str)
+ """
+ reg = core.BNGetArchitectureRegisterByName(self.handle, str(reg))
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetModifiedArchitectureRegistersOnWrite(self.handle, reg, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(core.BNGetArchitectureRegisterName(self.handle, regs[i]))
+ core.BNFreeRegisterList(regs)
+ return result
+
+[docs] def assemble(self, code, addr=0):
+ """
+ ``assemble`` converts the string of assembly instructions ``code`` loaded at virtual address ``addr`` to the
+ byte representation of those instructions.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ Architecture plugins can override this method to provide assembler functionality. This can be done by
+ simply shelling out to an assembler like yasm or llvm-mc, since this method isn't performance sensitive.
+
+ .. note :: It is important that the assembler used accepts a syntax identical to the one emitted by the \
+ disassembler. This will prevent confusing the user.
+
+ :param str code: string representation of the instructions to be assembled
+ :param int addr: virtual address that the instructions will be loaded at
+ :return: the bytes for the assembled instructions
+ :rtype: Python3 - a 'bytes' object; Python2 - a 'bytes' object
+ :Example:
+
+ >>> arch.assemble("je 10")
+ '\\x0f\\x84\\x04\\x00\\x00\\x00'
+ >>>
+ """
+ return self.perform_assemble(code, addr)
+
+[docs] def is_never_branch_patch_available(self, data, addr):
+ """
+ ``is_never_branch_patch_available`` determines if the instruction ``data`` at ``addr`` can be made to **never branch**.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: bytes for the instruction to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> arch.is_never_branch_patch_available(arch.assemble("je 10")[0], 0)
+ True
+ >>> arch.is_never_branch_patch_available(arch.assemble("nop")[0], 0)
+ False
+ >>>
+ """
+ return self.perform_is_never_branch_patch_available(data, addr)
+
+[docs] def is_always_branch_patch_available(self, data, addr):
+ """
+ ``is_always_branch_patch_available`` determines if the instruction ``data`` at ``addr`` can be made to
+ **always branch**.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: bytes for the instruction to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> arch.is_always_branch_patch_available(arch.assemble("je 10")[0], 0)
+ True
+ >>> arch.is_always_branch_patch_available(arch.assemble("nop")[0], 0)
+ False
+ >>>
+ """
+ return self.perform_is_always_branch_patch_available(data, addr)
+
+[docs] def is_invert_branch_patch_available(self, data, addr):
+ """
+ ``is_always_branch_patch_available`` determines if the instruction ``data`` at ``addr`` can be inverted.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: bytes for the instruction to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> arch.is_invert_branch_patch_available(arch.assemble("je 10")[0], 0)
+ True
+ >>> arch.is_invert_branch_patch_available(arch.assemble("nop")[0], 0)
+ False
+ >>>
+ """
+ return self.perform_is_invert_branch_patch_available(data, addr)
+
+[docs] def is_skip_and_return_zero_patch_available(self, data, addr):
+ """
+ ``is_skip_and_return_zero_patch_available`` determines if the instruction ``data`` at ``addr`` is a *call-like*
+ instruction that can be made into an instruction *returns zero*.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: bytes for the instruction to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> arch.is_skip_and_return_zero_patch_available(arch.assemble("call 0")[0], 0)
+ True
+ >>> arch.is_skip_and_return_zero_patch_available(arch.assemble("call eax")[0], 0)
+ True
+ >>> arch.is_skip_and_return_zero_patch_available(arch.assemble("jmp eax")[0], 0)
+ False
+ >>>
+ """
+ return self.perform_is_skip_and_return_zero_patch_available(data, addr)
+
+[docs] def is_skip_and_return_value_patch_available(self, data, addr):
+ """
+ ``is_skip_and_return_value_patch_available`` determines if the instruction ``data`` at ``addr`` is a *call-like*
+ instruction that can be made into an instruction *returns a value*.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: bytes for the instruction to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> arch.is_skip_and_return_value_patch_available(arch.assemble("call 0")[0], 0)
+ True
+ >>> arch.is_skip_and_return_value_patch_available(arch.assemble("jmp eax")[0], 0)
+ False
+ >>>
+ """
+ return self.perform_is_skip_and_return_value_patch_available(data, addr)
+
+[docs] def convert_to_nop(self, data, addr):
+ """
+ ``convert_to_nop`` reads the instruction(s) in ``data`` at virtual address ``addr`` and returns a string of nop
+ instructions of the same length as data.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: bytes for the instruction to be converted
+ :param int addr: the virtual address of the instruction to be patched
+ :return: string containing len(data) worth of no-operation instructions
+ :rtype: str
+ :Example:
+
+ >>> arch.convert_to_nop("\\x00\\x00", 0)
+ '\\x90\\x90'
+ >>>
+ """
+ return self.perform_convert_to_nop(data, addr)
+
+[docs] def always_branch(self, data, addr):
+ """
+ ``always_branch`` reads the instruction(s) in ``data`` at virtual address ``addr`` and returns a string of bytes
+ of the same length which always branches.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: bytes for the instruction to be converted
+ :param int addr: the virtual address of the instruction to be patched
+ :return: string containing len(data) which always branches to the same location as the provided instruction
+ :rtype: str
+ :Example:
+
+ >>> bytes = arch.always_branch(arch.assemble("je 10")[0], 0)
+ >>> arch.get_instruction_text(bytes, 0)
+ (['nop '], 1L)
+ >>> arch.get_instruction_text(bytes[1:], 0)
+ (['jmp ', '0x9'], 5L)
+ >>>
+ """
+ return self.perform_always_branch(data, addr)
+
+[docs] def invert_branch(self, data, addr):
+ """
+ ``invert_branch`` reads the instruction(s) in ``data`` at virtual address ``addr`` and returns a string of bytes
+ of the same length which inverts the branch of provided instruction.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: bytes for the instruction to be converted
+ :param int addr: the virtual address of the instruction to be patched
+ :return: string containing len(data) which always branches to the same location as the provided instruction
+ :rtype: str
+ :Example:
+
+ >>> arch.get_instruction_text(arch.invert_branch(arch.assemble("je 10")[0], 0), 0)
+ (['jne ', '0xa'], 6L)
+ >>> arch.get_instruction_text(arch.invert_branch(arch.assemble("jo 10")[0], 0), 0)
+ (['jno ', '0xa'], 6L)
+ >>> arch.get_instruction_text(arch.invert_branch(arch.assemble("jge 10")[0], 0), 0)
+ (['jl ', '0xa'], 6L)
+ >>>
+ """
+ return self.perform_invert_branch(data, addr)
+
+[docs] def skip_and_return_value(self, data, addr, value):
+ """
+ ``skip_and_return_value`` reads the instruction(s) in ``data`` at virtual address ``addr`` and returns a string of
+ bytes of the same length which doesn't call and instead *return a value*.
+
+ .. note:: Architecture subclasses should implement this method.
+
+ :param str data: bytes for the instruction to be converted
+ :param int addr: the virtual address of the instruction to be patched
+ :return: string containing len(data) which always branches to the same location as the provided instruction
+ :rtype: str
+ :Example:
+
+ >>> arch.get_instruction_text(arch.skip_and_return_value(arch.assemble("call 10")[0], 0, 0), 0)
+ (['mov ', 'eax', ', ', '0x0'], 5L)
+ >>>
+ """
+ return self.perform_skip_and_return_value(data, addr, value)
+
+[docs] def is_view_type_constant_defined(self, type_name, const_name):
+ """
+
+ :param str type_name: the BinaryView type name of the constant to query
+ :param str const_name: the constant name to query
+ :rtype: None
+ :Example:
+
+ >>> arch.set_view_type_constant("ELF", "R_COPY", ELF_RELOC_COPY)
+ >>> arch.is_view_type_constant_defined("ELF", "R_COPY")
+ True
+ >>> arch.is_view_type_constant_defined("ELF", "NOT_THERE")
+ False
+ >>>
+ """
+ return core.BNIsBinaryViewTypeArchitectureConstantDefined(self.handle, type_name, const_name)
+
+[docs] def get_view_type_constant(self, type_name, const_name, default_value=0):
+ """
+ ``get_view_type_constant`` retrieves the view type constant for the given type_name and const_name.
+
+ :param str type_name: the BinaryView type name of the constant to be retrieved
+ :param str const_name: the constant name to retrieved
+ :param int value: optional default value if the type_name is not present. default value is zero.
+ :return: The BinaryView type constant or the default_value if not found
+ :rtype: int
+ :Example:
+
+ >>> ELF_RELOC_COPY = 5
+ >>> arch.set_view_type_constant("ELF", "R_COPY", ELF_RELOC_COPY)
+ >>> arch.get_view_type_constant("ELF", "R_COPY")
+ 5L
+ >>> arch.get_view_type_constant("ELF", "NOT_HERE", 100)
+ 100L
+ """
+ return core.BNGetBinaryViewTypeArchitectureConstant(self.handle, type_name, const_name, default_value)
+
+[docs] def set_view_type_constant(self, type_name, const_name, value):
+ """
+ ``set_view_type_constant`` creates a new binaryview type constant.
+
+ :param str type_name: the BinaryView type name of the constant to be registered
+ :param str const_name: the constant name to register
+ :param int value: the value of the constant
+ :rtype: None
+ :Example:
+
+ >>> ELF_RELOC_COPY = 5
+ >>> arch.set_view_type_constant("ELF", "R_COPY", ELF_RELOC_COPY)
+ >>>
+ """
+ core.BNSetBinaryViewTypeArchitectureConstant(self.handle, type_name, const_name, value)
+
+[docs] def register_calling_convention(self, cc):
+ """
+ ``register_calling_convention`` registers a new calling convention for the Architecture.
+
+ :param CallingConvention cc: CallingConvention object to be registered
+ :rtype: None
+ """
+ core.BNRegisterCallingConvention(self.handle, cc.handle)
+
+
+_architecture_cache = {}
+[docs]class CoreArchitecture(Architecture):
+[docs] def __init__(self, handle):
+ super(CoreArchitecture, self).__init__()
+
+ self.handle = core.handle_of_type(handle, core.BNArchitecture)
+ self.__dict__["name"] = core.BNGetArchitectureName(self.handle)
+ self.__dict__["endianness"] = Endianness(core.BNGetArchitectureEndianness(self.handle))
+ self.__dict__["address_size"] = core.BNGetArchitectureAddressSize(self.handle)
+ self.__dict__["default_int_size"] = core.BNGetArchitectureDefaultIntegerSize(self.handle)
+ self.__dict__["instr_alignment"] = core.BNGetArchitectureInstructionAlignment(self.handle)
+ self.__dict__["max_instr_length"] = core.BNGetArchitectureMaxInstructionLength(self.handle)
+ self.__dict__["opcode_display_length"] = core.BNGetArchitectureOpcodeDisplayLength(self.handle)
+ self.__dict__["stack_pointer"] = core.BNGetArchitectureRegisterName(self.handle,
+ core.BNGetArchitectureStackPointerRegister(self.handle))
+
+ link_reg = core.BNGetArchitectureLinkRegister(self.handle)
+ if link_reg == 0xffffffff:
+ self.__dict__["link_reg"] = None
+ else:
+ self.__dict__["link_reg"] = core.BNGetArchitectureRegisterName(self.handle, link_reg)
+
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetAllArchitectureRegisters(self.handle, count)
+ self._all_regs = {}
+ self._regs_by_index = {}
+ self._full_width_regs = {}
+ self.__dict__["regs"] = {}
+ for i in range(0, count.value):
+ name = core.BNGetArchitectureRegisterName(self.handle, regs[i])
+ info = core.BNGetArchitectureRegisterInfo(self.handle, regs[i])
+ full_width_reg = core.BNGetArchitectureRegisterName(self.handle, info.fullWidthRegister)
+ self.regs[name] = binaryninja.function.RegisterInfo(full_width_reg, info.size, info.offset,
+ ImplicitRegisterExtend(info.extend), regs[i])
+ self._all_regs[name] = regs[i]
+ self._regs_by_index[regs[i]] = name
+ for i in range(0, count.value):
+ info = core.BNGetArchitectureRegisterInfo(self.handle, regs[i])
+ full_width_reg = core.BNGetArchitectureRegisterName(self.handle, info.fullWidthRegister)
+ if full_width_reg not in self._full_width_regs:
+ self._full_width_regs[full_width_reg] = self._all_regs[full_width_reg]
+ core.BNFreeRegisterList(regs)
+
+ count = ctypes.c_ulonglong()
+ flags = core.BNGetAllArchitectureFlags(self.handle, count)
+ self._flags = {}
+ self._flags_by_index = {}
+ self.__dict__["flags"] = []
+ for i in range(0, count.value):
+ name = core.BNGetArchitectureFlagName(self.handle, flags[i])
+ self._flags[name] = flags[i]
+ self._flags_by_index[flags[i]] = name
+ self.flags.append(name)
+ core.BNFreeRegisterList(flags)
+
+ count = ctypes.c_ulonglong()
+ write_types = core.BNGetAllArchitectureFlagWriteTypes(self.handle, count)
+ self._flag_write_types = {}
+ self._flag_write_types_by_index = {}
+ self.__dict__["flag_write_types"] = []
+ for i in range(0, count.value):
+ name = core.BNGetArchitectureFlagWriteTypeName(self.handle, write_types[i])
+ self._flag_write_types[name] = write_types[i]
+ self._flag_write_types_by_index[write_types[i]] = name
+ self.flag_write_types.append(name)
+ core.BNFreeRegisterList(write_types)
+
+ count = ctypes.c_ulonglong()
+ sem_classes = core.BNGetAllArchitectureSemanticFlagClasses(self.handle, count)
+ self._semantic_flag_classes = {}
+ self._semantic_flag_classes_by_index = {}
+ self.__dict__["semantic_flag_classes"] = []
+ for i in range(0, count.value):
+ name = core.BNGetArchitectureSemanticFlagClassName(self.handle, sem_classes[i])
+ self._semantic_flag_classes[name] = sem_classes[i]
+ self._semantic_flag_classes_by_index[sem_classes[i]] = name
+ self.semantic_flag_classes.append(name)
+ core.BNFreeRegisterList(sem_classes)
+
+ count = ctypes.c_ulonglong()
+ sem_groups = core.BNGetAllArchitectureSemanticFlagGroups(self.handle, count)
+ self._semantic_flag_groups = {}
+ self._semantic_flag_groups_by_index = {}
+ self.__dict__["semantic_flag_groups"] = []
+ for i in range(0, count.value):
+ name = core.BNGetArchitectureSemanticFlagGroupName(self.handle, sem_groups[i])
+ self._semantic_flag_groups[name] = sem_groups[i]
+ self._semantic_flag_groups_by_index[sem_groups[i]] = name
+ self.semantic_flag_groups.append(name)
+ core.BNFreeRegisterList(sem_groups)
+
+ self._flag_roles = {}
+ self.__dict__["flag_roles"] = {}
+ for flag in self.__dict__["flags"]:
+ role = FlagRole(core.BNGetArchitectureFlagRole(self.handle, self._flags[flag], 0))
+ self.__dict__["flag_roles"][flag] = role
+ self._flag_roles[self._flags[flag]] = role
+
+ self.__dict__["flags_required_for_flag_condition"] = {}
+ for cond in LowLevelILFlagCondition:
+ count = ctypes.c_ulonglong()
+ flags = core.BNGetArchitectureFlagsRequiredForFlagCondition(self.handle, cond, 0, count)
+ flag_names = []
+ for i in range(0, count.value):
+ flag_names.append(self._flags_by_index[flags[i]])
+ core.BNFreeRegisterList(flags)
+ self.__dict__["flags_required_for_flag_condition"][cond] = flag_names
+
+ self._flags_required_by_semantic_flag_group = {}
+ self.__dict__["flags_required_for_semantic_flag_group"] = {}
+ for group in self.semantic_flag_groups:
+ count = ctypes.c_ulonglong()
+ flags = core.BNGetArchitectureFlagsRequiredForSemanticFlagGroup(self.handle,
+ self._semantic_flag_groups[group], count)
+ flag_indexes = []
+ flag_names = []
+ for i in range(0, count.value):
+ flag_indexes.append(flags[i])
+ flag_names.append(self._flags_by_index[flags[i]])
+ core.BNFreeRegisterList(flags)
+ self._flags_required_by_semantic_flag_group[self._semantic_flag_groups[group]] = flag_indexes
+ self.__dict__["flags_required_for_semantic_flag_group"][cond] = flag_names
+
+ self._flag_conditions_for_semantic_flag_group = {}
+ self.__dict__["flag_conditions_for_semantic_flag_group"] = {}
+ for group in self.semantic_flag_groups:
+ count = ctypes.c_ulonglong()
+ conditions = core.BNGetArchitectureFlagConditionsForSemanticFlagGroup(self.handle,
+ self._semantic_flag_groups[group], count)
+ class_index_cond = {}
+ class_cond = {}
+ for i in range(0, count.value):
+ class_index_cond[conditions[i].semanticClass] = conditions[i].condition
+ if conditions[i].semanticClass == 0:
+ class_cond[None] = conditions[i].condition
+ elif conditions[i].semanticClass in self._semantic_flag_classes_by_index:
+ class_cond[self._semantic_flag_classes_by_index[conditions[i].semanticClass]] = conditions[i].condition
+ core.BNFreeFlagConditionsForSemanticFlagGroup(conditions)
+ self._flag_conditions_for_semantic_flag_group[self._semantic_flag_groups[group]] = class_index_cond
+ self.__dict__["flag_conditions_for_semantic_flag_group"][group] = class_cond
+
+ self._flags_written_by_flag_write_type = {}
+ self.__dict__["flags_written_by_flag_write_type"] = {}
+ for write_type in self.flag_write_types:
+ count = ctypes.c_ulonglong()
+ flags = core.BNGetArchitectureFlagsWrittenByFlagWriteType(self.handle,
+ self._flag_write_types[write_type], count)
+ flag_indexes = []
+ flag_names = []
+ for i in range(0, count.value):
+ flag_indexes.append(flags[i])
+ flag_names.append(self._flags_by_index[flags[i]])
+ core.BNFreeRegisterList(flags)
+ self._flags_written_by_flag_write_type[self._flag_write_types[write_type]] = flag_indexes
+ self.__dict__["flags_written_by_flag_write_type"][write_type] = flag_names
+
+ self._semantic_class_for_flag_write_type = {}
+ self.__dict__["semantic_class_for_flag_write_type"] = {}
+ for write_type in self.flag_write_types:
+ sem_class = core.BNGetArchitectureSemanticClassForFlagWriteType(self.handle,
+ self._flag_write_types[write_type])
+ if sem_class == 0:
+ sem_class_name = None
+ else:
+ sem_class_name = self._semantic_flag_classes_by_index[sem_class]
+ self._semantic_class_for_flag_write_type[self._flag_write_types[write_type]] = sem_class
+ self.__dict__["semantic_class_for_flag_write_type"][write_type] = sem_class_name
+
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetArchitectureGlobalRegisters(self.handle, count)
+ self.__dict__["global_regs"] = []
+ for i in range(0, count.value):
+ self.global_regs.append(core.BNGetArchitectureRegisterName(self.handle, regs[i]))
+ core.BNFreeRegisterList(regs)
+
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetAllArchitectureRegisterStacks(self.handle, count)
+ self._all_reg_stacks = {}
+ self._reg_stacks_by_index = {}
+ self.__dict__["reg_stacks"] = {}
+ for i in range(0, count.value):
+ name = core.BNGetArchitectureRegisterStackName(self.handle, regs[i])
+ info = core.BNGetArchitectureRegisterStackInfo(self.handle, regs[i])
+ storage = []
+ for j in range(0, info.storageCount):
+ storage.append(core.BNGetArchitectureRegisterName(self.handle, info.firstStorageReg + j))
+ top_rel = []
+ for j in range(0, info.topRelativeCount):
+ top_rel.append(core.BNGetArchitectureRegisterName(self.handle, info.firstTopRelativeReg + j))
+ top = core.BNGetArchitectureRegisterName(self.handle, info.stackTopReg)
+ self.reg_stacks[name] = binaryninja.function.RegisterStackInfo(storage, top_rel, top, regs[i])
+ self._all_reg_stacks[name] = regs[i]
+ self._reg_stacks_by_index[regs[i]] = name
+ core.BNFreeRegisterList(regs)
+
+ count = ctypes.c_ulonglong()
+ intrinsics = core.BNGetAllArchitectureIntrinsics(self.handle, count)
+ self._intrinsics = {}
+ self._intrinsics_by_index = {}
+ self.__dict__["intrinsics"] = {}
+ for i in range(0, count.value):
+ name = core.BNGetArchitectureIntrinsicName(self.handle, intrinsics[i])
+ input_count = ctypes.c_ulonglong()
+ inputs = core.BNGetArchitectureIntrinsicInputs(self.handle, intrinsics[i], input_count)
+ input_list = []
+ for j in range(0, input_count.value):
+ input_name = inputs[j].name
+ type_obj = types.Type(core.BNNewTypeReference(inputs[j].type), confidence = inputs[j].typeConfidence)
+ input_list.append(binaryninja.function.IntrinsicInput(type_obj, input_name))
+ core.BNFreeNameAndTypeList(inputs, input_count.value)
+ output_count = ctypes.c_ulonglong()
+ outputs = core.BNGetArchitectureIntrinsicOutputs(self.handle, intrinsics[i], output_count)
+ output_list = []
+ for j in range(0, output_count.value):
+ output_list.append(types.Type(core.BNNewTypeReference(outputs[j].type), confidence = outputs[j].confidence))
+ core.BNFreeOutputTypeList(outputs, output_count.value)
+ self.intrinsics[name] = binaryninja.function.IntrinsicInfo(input_list, output_list)
+ self._intrinsics[name] = intrinsics[i]
+ self._intrinsics_by_index[intrinsics[i]] = (name, self.intrinsics[name])
+ core.BNFreeRegisterList(intrinsics)
+ if type(self) is CoreArchitecture:
+ global _architecture_cache
+ _architecture_cache[ctypes.addressof(handle.contents)] = self
+
+ @classmethod
+ def _from_cache(cls, handle):
+ global _architecture_cache
+ return _architecture_cache.get(ctypes.addressof(handle.contents)) or cls(handle)
+
+[docs] def get_associated_arch_by_address(self, addr):
+ new_addr = ctypes.c_ulonglong()
+ new_addr.value = addr
+ result = core.BNGetAssociatedArchitectureByAddress(self.handle, new_addr)
+ return CoreArchitecture._from_cache(handle = result), new_addr.value
+
+[docs] def get_instruction_info(self, data, addr):
+ """
+ ``get_instruction_info`` returns an InstructionInfo object for the instruction at the given virtual address
+ ``addr`` with data ``data``.
+
+ .. note :: The instruction info object should always set the InstructionInfo.length to the instruction length, \
+ and the branches of the proper types should be added if the instruction is a branch.
+
+ :param str data: max_instruction_length bytes from the binary at virtual address ``addr``
+ :param int addr: virtual address of bytes in ``data``
+ :return: the InstructionInfo for the current instruction
+ :rtype: InstructionInfo
+ """
+ info = core.BNInstructionInfo()
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ if not core.BNGetInstructionInfo(self.handle, buf, addr, len(data), info):
+ return None
+ result = binaryninja.function.InstructionInfo()
+ result.length = info.length
+ result.arch_transition_by_target_addr = info.archTransitionByTargetAddr
+ result.branch_delay = info.branchDelay
+ for i in range(0, info.branchCount):
+ target = info.branchTarget[i]
+ if info.branchArch[i]:
+ arch = CoreArchitecture._from_cache(info.branchArch[i])
+ else:
+ arch = None
+ result.add_branch(BranchType(info.branchType[i]), target, arch)
+ return result
+
+[docs] def get_instruction_text(self, data, addr):
+ """
+ ``get_instruction_text`` returns a list of InstructionTextToken objects for the instruction at the given virtual
+ address ``addr`` with data ``data``.
+
+ :param str data: max_instruction_length bytes from the binary at virtual address ``addr``
+ :param int addr: virtual address of bytes in ``data``
+ :return: an InstructionTextToken list for the current instruction
+ :rtype: list(InstructionTextToken)
+ """
+ if not isinstance(data, bytes):
+ if isinstance(data, str):
+ data=data.encode()
+ else:
+ raise TypeError("Must be bytes or str")
+ count = ctypes.c_ulonglong()
+ length = ctypes.c_ulonglong()
+ length.value = len(data)
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ tokens = ctypes.POINTER(core.BNInstructionTextToken)()
+ if not core.BNGetInstructionText(self.handle, buf, addr, length, tokens, count):
+ return None, 0
+ result = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens, count.value)
+ core.BNFreeInstructionText(tokens, count.value)
+ return result, length.value
+
+[docs] def get_instruction_low_level_il(self, data, addr, il):
+ """
+ ``get_instruction_low_level_il`` appends LowLevelILExpr objects to ``il`` for the instruction at the given
+ virtual address ``addr`` with data ``data``.
+
+ This is used to analyze arbitrary data at an address, if you are working with an existing binary, you likely
+ want to be using ``Function.get_low_level_il_at``.
+
+ :param str data: max_instruction_length bytes from the binary at virtual address ``addr``
+ :param int addr: virtual address of bytes in ``data``
+ :param LowLevelILFunction il: The function the current instruction belongs to
+ :return: the length of the current instruction
+ :rtype: int
+ """
+ length = ctypes.c_ulonglong()
+ length.value = len(data)
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ core.BNGetInstructionLowLevelIL(self.handle, buf, addr, length, il.handle)
+ return length.value
+
+[docs] def get_flag_write_low_level_il(self, op, size, write_type, flag, operands, il):
+ """
+ :param LowLevelILOperation op:
+ :param int size:
+ :param str write_type:
+ :param list(str or int) operands: a list of either items that are either string register names or constant \
+ integer values
+ :param LowLevelILFunction il:
+ :rtype: LowLevelILExpr
+ """
+ flag = self.get_flag_index(flag)
+ operand_list = (core.BNRegisterOrConstant * len(operands))()
+ for i in range(len(operands)):
+ if isinstance(operands[i], str):
+ operand_list[i].constant = False
+ operand_list[i].reg = self.regs[operands[i]].index
+ elif isinstance(operands[i], lowlevelil.ILRegister):
+ operand_list[i].constant = False
+ operand_list[i].reg = operands[i].index
+ else:
+ operand_list[i].constant = True
+ operand_list[i].value = operands[i]
+ return lowlevelil.LowLevelILExpr(core.BNGetArchitectureFlagWriteLowLevelIL(self.handle, op, size,
+ self._flag_write_types[write_type], flag, operand_list, len(operand_list), il.handle))
+
+[docs] def get_flag_condition_low_level_il(self, cond, sem_class, il):
+ """
+ :param LowLevelILFlagCondition cond: Flag condition to be computed
+ :param str sem_class: Semantic class to be used (None for default semantics)
+ :param LowLevelILFunction il: LowLevelILFunction object to append LowLevelILExpr objects to
+ :rtype: LowLevelILExpr
+ """
+ class_index = self.get_semantic_flag_class_index(sem_class)
+ return lowlevelil.LowLevelILExpr(core.BNGetArchitectureFlagConditionLowLevelIL(self.handle, cond,
+ class_index, il.handle))
+
+[docs] def get_semantic_flag_group_low_level_il(self, sem_group, il):
+ """
+ :param str sem_group:
+ :param LowLevelILFunction il:
+ :rtype: LowLevelILExpr
+ """
+ group_index = self.get_semantic_flag_group_index(sem_group)
+ return lowlevelil.LowLevelILExpr(core.BNGetArchitectureSemanticFlagGroupLowLevelIL(self.handle, group_index, il.handle))
+
+[docs] def assemble(self, code, addr=0):
+ """
+ ``assemble`` converts the string of assembly instructions ``code`` loaded at virtual address ``addr`` to the
+ byte representation of those instructions.
+
+ :param str code: string representation of the instructions to be assembled
+ :param int addr: virtual address that the instructions will be loaded at
+ :return: the bytes for the assembled instructions
+ :rtype: Python3 - a 'bytes' object; Python2 - a 'bytes' object
+ :Example:
+
+ >>> arch.assemble("je 10")
+ '\\x0f\\x84\\x04\\x00\\x00\\x00'
+ >>>
+ """
+ result = databuffer.DataBuffer()
+ errors = ctypes.c_char_p()
+ if not core.BNAssemble(self.handle, code, addr, result.handle, errors):
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise ValueError("Could not assemble: %s" % errors.value)
+ if isinstance(str(result), bytes):
+ return str(result)
+ else:
+ return bytes(result)
+
+[docs] def is_never_branch_patch_available(self, data, addr):
+ """
+ ``is_never_branch_patch_available`` determines if the instruction ``data`` at ``addr`` can be made to **never branch**.
+
+ :param str data: bytes for the instruction to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> arch.is_never_branch_patch_available(arch.assemble("je 10")[0], 0)
+ True
+ >>> arch.is_never_branch_patch_available(arch.assemble("nop")[0], 0)
+ False
+ >>>
+ """
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ return core.BNIsArchitectureNeverBranchPatchAvailable(self.handle, buf, addr, len(data))
+
+[docs] def is_always_branch_patch_available(self, data, addr):
+ """
+ ``is_always_branch_patch_available`` determines if the instruction ``data`` at ``addr`` can be made to
+ **always branch**.
+
+ :param str data: bytes for the instruction to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> arch.is_always_branch_patch_available(arch.assemble("je 10")[0], 0)
+ True
+ >>> arch.is_always_branch_patch_available(arch.assemble("nop")[0], 0)
+ False
+ >>>
+ """
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ return core.BNIsArchitectureAlwaysBranchPatchAvailable(self.handle, buf, addr, len(data))
+
+[docs] def is_invert_branch_patch_available(self, data, addr):
+ """
+ ``is_always_branch_patch_available`` determines if the instruction ``data`` at ``addr`` can be inverted.
+
+ :param str data: bytes for the instruction to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> arch.is_invert_branch_patch_available(arch.assemble("je 10")[0], 0)
+ True
+ >>> arch.is_invert_branch_patch_available(arch.assemble("nop")[0], 0)
+ False
+ >>>
+ """
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ return core.BNIsArchitectureInvertBranchPatchAvailable(self.handle, buf, addr, len(data))
+
+[docs] def is_skip_and_return_zero_patch_available(self, data, addr):
+ """
+ ``is_skip_and_return_zero_patch_available`` determines if the instruction ``data`` at ``addr`` is a *call-like*
+ instruction that can be made into an instruction *returns zero*.
+
+ :param str data: bytes for the instruction to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> arch.is_skip_and_return_zero_patch_available(arch.assemble("call 0")[0], 0)
+ True
+ >>> arch.is_skip_and_return_zero_patch_available(arch.assemble("call eax")[0], 0)
+ True
+ >>> arch.is_skip_and_return_zero_patch_available(arch.assemble("jmp eax")[0], 0)
+ False
+ >>>
+ """
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ return core.BNIsArchitectureSkipAndReturnZeroPatchAvailable(self.handle, buf, addr, len(data))
+
+[docs] def is_skip_and_return_value_patch_available(self, data, addr):
+ """
+ ``is_skip_and_return_value_patch_available`` determines if the instruction ``data`` at ``addr`` is a *call-like*
+ instruction that can be made into an instruction *returns a value*.
+
+ :param str data: bytes for the instruction to be checked
+ :param int addr: the virtual address of the instruction to be patched
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> arch.is_skip_and_return_value_patch_available(arch.assemble("call 0")[0], 0)
+ True
+ >>> arch.is_skip_and_return_value_patch_available(arch.assemble("jmp eax")[0], 0)
+ False
+ >>>
+ """
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ return core.BNIsArchitectureSkipAndReturnValuePatchAvailable(self.handle, buf, addr, len(data))
+
+[docs] def convert_to_nop(self, data, addr):
+ """
+ ``convert_to_nop`` reads the instruction(s) in ``data`` at virtual address ``addr`` and returns a string of nop
+ instructions of the same length as data.
+
+ :param str data: bytes for the instruction to be converted
+ :param int addr: the virtual address of the instruction to be patched
+ :return: string containing len(data) worth of no-operation instructions
+ :rtype: str
+ :Example:
+
+ >>> arch.convert_to_nop("\\x00\\x00", 0)
+ '\\x90\\x90'
+ >>>
+ """
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ if not core.BNArchitectureConvertToNop(self.handle, buf, addr, len(data)):
+ return None
+ result = ctypes.create_string_buffer(len(data))
+ ctypes.memmove(result, buf, len(data))
+ return result.raw
+
+[docs] def always_branch(self, data, addr):
+ """
+ ``always_branch`` reads the instruction(s) in ``data`` at virtual address ``addr`` and returns a string of bytes
+ of the same length which always branches.
+
+ :param str data: bytes for the instruction to be converted
+ :param int addr: the virtual address of the instruction to be patched
+ :return: string containing len(data) which always branches to the same location as the provided instruction
+ :rtype: str
+ :Example:
+
+ >>> bytes = arch.always_branch(arch.assemble("je 10")[0], 0)
+ >>> arch.get_instruction_text(bytes, 0)
+ (['nop '], 1L)
+ >>> arch.get_instruction_text(bytes[1:], 0)
+ (['jmp ', '0x9'], 5L)
+ >>>
+ """
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ if not core.BNArchitectureAlwaysBranch(self.handle, buf, addr, len(data)):
+ return None
+ result = ctypes.create_string_buffer(len(data))
+ ctypes.memmove(result, buf, len(data))
+ return result.raw
+
+[docs] def invert_branch(self, data, addr):
+ """
+ ``invert_branch`` reads the instruction(s) in ``data`` at virtual address ``addr`` and returns a string of bytes
+ of the same length which inverts the branch of provided instruction.
+
+ :param str data: bytes for the instruction to be converted
+ :param int addr: the virtual address of the instruction to be patched
+ :return: string containing len(data) which always branches to the same location as the provided instruction
+ :rtype: str
+ :Example:
+
+ >>> arch.get_instruction_text(arch.invert_branch(arch.assemble("je 10")[0], 0), 0)
+ (['jne ', '0xa'], 6L)
+ >>> arch.get_instruction_text(arch.invert_branch(arch.assemble("jo 10")[0], 0), 0)
+ (['jno ', '0xa'], 6L)
+ >>> arch.get_instruction_text(arch.invert_branch(arch.assemble("jge 10")[0], 0), 0)
+ (['jl ', '0xa'], 6L)
+ >>>
+ """
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ if not core.BNArchitectureInvertBranch(self.handle, buf, addr, len(data)):
+ return None
+ result = ctypes.create_string_buffer(len(data))
+ ctypes.memmove(result, buf, len(data))
+ return result.raw
+
+[docs] def skip_and_return_value(self, data, addr, value):
+ """
+ ``skip_and_return_value`` reads the instruction(s) in ``data`` at virtual address ``addr`` and returns a string of
+ bytes of the same length which doesn't call and instead *return a value*.
+
+ :param str data: bytes for the instruction to be converted
+ :param int addr: the virtual address of the instruction to be patched
+ :return: string containing len(data) which always branches to the same location as the provided instruction
+ :rtype: str
+ :Example:
+
+ >>> arch.get_instruction_text(arch.skip_and_return_value(arch.assemble("call 10")[0], 0, 0), 0)
+ (['mov ', 'eax', ', ', '0x0'], 5L)
+ >>>
+ """
+ buf = (ctypes.c_ubyte * len(data))()
+ ctypes.memmove(buf, data, len(data))
+ if not core.BNArchitectureSkipAndReturnValue(self.handle, buf, addr, len(data), value):
+ return None
+ result = ctypes.create_string_buffer(len(data))
+ ctypes.memmove(result, buf, len(data))
+ return result.raw
+
+[docs] def get_flag_role(self, flag, sem_class = None):
+ """
+ ``get_flag_role`` gets the role of a given flag.
+
+ :param int flag: flag
+ :param int sem_class: optional semantic flag class
+ :return: flag role
+ :rtype: FlagRole
+ """
+ flag = self.get_flag_index(flag)
+ sem_class = self.get_semantic_flag_class_index(sem_class)
+ return FlagRole(core.BNGetArchitectureFlagRole(self.handle, flag, sem_class))
+
+[docs] def get_flags_required_for_flag_condition(self, cond, sem_class = None):
+ sem_class = self.get_semantic_flag_class_index(sem_class)
+ count = ctypes.c_ulonglong()
+ flags = core.BNGetArchitectureFlagsRequiredForFlagCondition(self.handle, cond, sem_class, count)
+ flag_names = []
+ for i in range(0, count.value):
+ flag_names.append(self._flags_by_index[flags[i]])
+ core.BNFreeRegisterList(flags)
+ return flag_names
+
+
+[docs]class ArchitectureHook(CoreArchitecture):
+[docs] def __init__(self, base_arch):
+ self.base_arch = base_arch
+ super(ArchitectureHook, self).__init__(base_arch.handle)
+
+ # To improve performance of simpler hooks, use null callback for functions that are not being overridden
+ if self.get_associated_arch_by_address.__code__ == CoreArchitecture.get_associated_arch_by_address.__code__:
+ self._cb.getAssociatedArchitectureByAddress = self._cb.getAssociatedArchitectureByAddress.__class__()
+ if self.get_instruction_info.__code__ == CoreArchitecture.get_instruction_info.__code__:
+ self._cb.getInstructionInfo = self._cb.getInstructionInfo.__class__()
+ if self.get_instruction_text.__code__ == CoreArchitecture.get_instruction_text.__code__:
+ self._cb.getInstructionText = self._cb.getInstructionText.__class__()
+ if self.__class__.stack_pointer is None:
+ self._cb.getStackPointerRegister = self._cb.getStackPointerRegister.__class__()
+ if self.__class__.link_reg is None:
+ self._cb.getLinkRegister = self._cb.getLinkRegister.__class__()
+ if len(self.__class__.regs) == 0:
+ self._cb.getRegisterInfo = self._cb.getRegisterInfo.__class__()
+ self._cb.getRegisterName = self._cb.getRegisterName.__class__()
+ if len(self.__class__.reg_stacks) == 0:
+ self._cb.getRegisterStackName = self._cb.getRegisterStackName.__class__()
+ self._cb.getRegisterStackInfo = self._cb.getRegisterStackInfo.__class__()
+ if len(self.__class__.intrinsics) == 0:
+ self._cb.getIntrinsicName = self._cb.getIntrinsicName.__class__()
+ self._cb.getIntrinsicInputs = self._cb.getIntrinsicInputs.__class__()
+ self._cb.freeNameAndTypeList = self._cb.freeNameAndTypeList.__class__()
+ self._cb.getIntrinsicOutputs = self._cb.getIntrinsicOutputs.__class__()
+ self._cb.freeTypeList = self._cb.freeTypeList.__class__()
+
+[docs] def register(self):
+ self.__class__._registered_cb = self._cb
+ self.handle = core.BNRegisterArchitectureHook(self.base_arch.handle, self._cb)
+
+
+
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import ctypes
+
+# Binary Ninja components
+import binaryninja
+from binaryninja import highlight
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import BranchType, HighlightColorStyle, HighlightStandardColor, InstructionTextTokenType
+from binaryninja import log
+
+# 2-3 compatibility
+from binaryninja import range
+
+
+[docs]class BasicBlockEdge(object):
+[docs] def __init__(self, branch_type, source, target, back_edge, fall_through):
+ self.type = branch_type
+ self.source = source
+ self.target = target
+ self.back_edge = back_edge
+ self.fall_through = fall_through
+
+ def __eq__(self, value):
+ if not isinstance(value, BasicBlockEdge):
+ return False
+ return (self.type, self.source, self.target, self.back_edge, self.fall_through) == (value.type, value.source, value.target, value.back_edge, value.fall_through)
+
+ def __hash__(self):
+ return hash((self.type, self.source, self.target, self.back_edge, self.fall_through))
+
+ def __repr__(self):
+ if self.type == BranchType.UnresolvedBranch:
+ return "<%s>" % BranchType(self.type).name
+ elif self.target.arch:
+ return "<%s: %s@%#x>" % (BranchType(self.type).name, self.target.arch.name, self.target.start)
+ else:
+ return "<%s: %#x>" % (BranchType(self.type).name, self.target.start)
+
+
+[docs]class BasicBlock(object):
+[docs] def __init__(self, handle, view = None):
+ self._view = view
+ self.handle = core.handle_of_type(handle, core.BNBasicBlock)
+ self._arch = None
+ self._func = None
+ self._instStarts = None
+ self._instLengths = None
+
+ def __del__(self):
+ core.BNFreeBasicBlock(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, BasicBlock):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, BasicBlock):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __len__(self):
+ return int(core.BNGetBasicBlockLength(self.handle))
+
+ def __repr__(self):
+ arch = self.arch
+ if arch:
+ return "<block: %s@%#x-%#x>" % (arch.name, self.start, self.end)
+ else:
+ return "<block: %#x-%#x>" % (self.start, self.end)
+
+ def __iter__(self):
+ if self._instStarts is None:
+ # don't and instruction start cache the object is likely ephemeral
+ idx = self.start
+ while idx < self.end:
+ data = self.view.read(idx, min(self.arch.max_instr_length, self.end - idx))
+ inst_text = self.arch.get_instruction_text(data, idx)
+ if inst_text[1] == 0:
+ break
+ yield inst_text
+ idx += inst_text[1]
+ else:
+ for start, length in zip(self._instStarts, self._instLengths):
+ inst_text = self.arch.get_instruction_text(self.view.read(start, length), start)
+ if inst_text[1] == 0:
+ break
+ yield inst_text
+
+ def __getitem__(self, i):
+ self._buildStartCache()
+ start = self._instStarts[i]
+ length = self._instLengths[i]
+ data = self.view.read(start, length)
+ return self.arch.get_instruction_text(data, start)
+
+ def __hash__(self):
+ return hash((self.start, self.end, self.arch.name))
+
+ def _buildStartCache(self):
+ if self._instStarts is None:
+ # build the instruction start cache
+ self._instLengths = []
+ self._instStarts = []
+ start = self.start
+ while start < self.end:
+ length = self.view.get_instruction_length(start)
+ self._instLengths.append(length)
+ self._instStarts.append(start)
+ start += length
+
+ def _create_instance(self, handle, view):
+ """Internal method used to instantiate child instances"""
+ return BasicBlock(handle, view)
+
+ @property
+ def instruction_count(self):
+ self._buildStartCache()
+ return len(self._instStarts)
+
+ @property
+ def function(self):
+ """Basic block function (read-only)"""
+ if self._func is not None:
+ return self._func
+ func = core.BNGetBasicBlockFunction(self.handle)
+ if func is None:
+ return None
+ self._func =binaryninja.function.Function(self._view, func)
+ return self._func
+
+ @property
+ def view(self):
+ """Binary view that contains the basic block (read-ony)"""
+ if self._view is not None:
+ return self._view
+ self._view = self.function.view
+ return self._view
+
+ @property
+ def arch(self):
+ """Basic block architecture (read-only)"""
+ # The arch for a BasicBlock isn't going to change so just cache
+ # it the first time we need it
+ if self._arch is not None:
+ return self._arch
+ arch = core.BNGetBasicBlockArchitecture(self.handle)
+ if arch is None:
+ return None
+ self._arch = binaryninja.architecture.CoreArchitecture._from_cache(arch)
+ return self._arch
+
+ @property
+ def source_block(self):
+ """Basic block source block (read-only)"""
+ block = core.BNGetBasicBlockSource(self.handle)
+ if block is None:
+ return None
+ return BasicBlock(block, self._view)
+
+ @property
+ def start(self):
+ """Basic block start (read-only)"""
+ return core.BNGetBasicBlockStart(self.handle)
+
+ @property
+ def end(self):
+ """Basic block end (read-only)"""
+ return core.BNGetBasicBlockEnd(self.handle)
+
+ @property
+ def length(self):
+ """Basic block length (read-only)"""
+ return core.BNGetBasicBlockLength(self.handle)
+
+ @property
+ def index(self):
+ """Basic block index in list of blocks for the function (read-only)"""
+ return core.BNGetBasicBlockIndex(self.handle)
+
+ @property
+ def outgoing_edges(self):
+ """List of basic block outgoing edges (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ edges = core.BNGetBasicBlockOutgoingEdges(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ branch_type = BranchType(edges[i].type)
+ if edges[i].target:
+ target = self._create_instance(core.BNNewBasicBlockReference(edges[i].target), self.view)
+ else:
+ target = None
+ result.append(BasicBlockEdge(branch_type, self, target, edges[i].backEdge, edges[i].fallThrough))
+ core.BNFreeBasicBlockEdgeList(edges, count.value)
+ return result
+
+ @property
+ def incoming_edges(self):
+ """List of basic block incoming edges (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ edges = core.BNGetBasicBlockIncomingEdges(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ branch_type = BranchType(edges[i].type)
+ if edges[i].target:
+ target = self._create_instance(core.BNNewBasicBlockReference(edges[i].target), self.view)
+ else:
+ target = None
+ result.append(BasicBlockEdge(branch_type, target, self, edges[i].backEdge, edges[i].fallThrough))
+ core.BNFreeBasicBlockEdgeList(edges, count.value)
+ return result
+
+ @property
+ def has_undetermined_outgoing_edges(self):
+ """Whether basic block has undetermined outgoing edges (read-only)"""
+ return core.BNBasicBlockHasUndeterminedOutgoingEdges(self.handle)
+
+ @property
+ def can_exit(self):
+ """Whether basic block can return or is tagged as 'No Return' (read-only)"""
+ return core.BNBasicBlockCanExit(self.handle)
+
+ @property
+ def dominators(self):
+ """List of dominators for this basic block (read-only)"""
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetBasicBlockDominators(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
+ core.BNFreeBasicBlockList(blocks, count.value)
+ return result
+
+ @property
+ def strict_dominators(self):
+ """List of strict dominators for this basic block (read-only)"""
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetBasicBlockStrictDominators(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
+ core.BNFreeBasicBlockList(blocks, count.value)
+ return result
+
+ @property
+ def immediate_dominator(self):
+ """Immediate dominator of this basic block (read-only)"""
+ result = core.BNGetBasicBlockImmediateDominator(self.handle)
+ if not result:
+ return None
+ return self._create_instance(result, self.view)
+
+ @property
+ def dominator_tree_children(self):
+ """List of child blocks in the dominator tree for this basic block (read-only)"""
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetBasicBlockDominatorTreeChildren(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
+ core.BNFreeBasicBlockList(blocks, count.value)
+ return result
+
+ @property
+ def dominance_frontier(self):
+ """Dominance frontier for this basic block (read-only)"""
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetBasicBlockDominanceFrontier(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))
+ core.BNFreeBasicBlockList(blocks, count.value)
+ return result
+
+ @property
+ def annotations(self):
+ """List of automatic annotations for the start of this block (read-only)"""
+ return self.function.get_block_annotations(self.start, self.arch)
+
+ @property
+ def disassembly_text(self):
+ """
+ ``disassembly_text`` property which returns a list of binaryninja.function.DisassemblyTextLine objects for the current basic block.
+ :Example:
+
+ >>> current_basic_block.disassembly_text
+ [<0x100000f30: _main:>, ...]
+ """
+ return self.get_disassembly_text()
+
+ @property
+ def highlight(self):
+ """Gets or sets the highlight color for basic block
+
+ :Example:
+
+ >>> current_basic_block.highlight = HighlightStandardColor.BlueHighlightColor
+ >>> current_basic_block.highlight
+ <color: blue>
+ """
+ return highlight.HighlightColor._from_core_struct(core.BNGetBasicBlockHighlight(self.handle))
+
+ @highlight.setter
+ def highlight(self, value):
+ self.set_user_highlight(value)
+
+ @property
+ def is_il(self):
+ """Whether the basic block contains IL"""
+ return core.BNIsILBasicBlock(self.handle)
+
+ @property
+ def is_low_level_il(self):
+ """Whether the basic block contains Low Level IL"""
+ return core.BNIsLowLevelILBasicBlock(self.handle)
+
+ @property
+ def is_medium_level_il(self):
+ """Whether the basic block contains Medium Level IL"""
+ return core.BNIsMediumLevelILBasicBlock(self.handle)
+
+[docs] @classmethod
+ def get_iterated_dominance_frontier(self, blocks):
+ if len(blocks) == 0:
+ return []
+ block_set = (ctypes.POINTER(core.BNBasicBlock) * len(blocks))()
+ for i in range(len(blocks)):
+ block_set[i] = blocks[i].handle
+ count = ctypes.c_ulonglong()
+ out_blocks = core.BNGetBasicBlockIteratedDominanceFrontier(block_set, len(blocks), count)
+ result = []
+ for i in range(0, count.value):
+ result.append(BasicBlock(core.BNNewBasicBlockReference(out_blocks[i]), blocks[0].view))
+ core.BNFreeBasicBlockList(out_blocks, count.value)
+ return result
+
+
+
+[docs] def get_disassembly_text(self, settings=None):
+ """
+ ``get_disassembly_text`` returns a list of binaryninja.function.DisassemblyTextLine objects for the current basic block.
+
+ :param DisassemblySettings settings: (optional) DisassemblySettings object
+ :Example:
+
+ >>> current_basic_block.get_disassembly_text()
+ [<0x100000f30: _main:>, <0x100000f30: push rbp>, ... ]
+ """
+ settings_obj = None
+ if settings:
+ settings_obj = settings.handle
+
+ count = ctypes.c_ulonglong()
+ lines = core.BNGetBasicBlockDisassemblyText(self.handle, settings_obj, count)
+ result = []
+ for i in range(0, count.value):
+ addr = lines[i].addr
+ if (lines[i].instrIndex != 0xffffffffffffffff) and hasattr(self, 'il_function'):
+ il_instr = self.il_function[lines[i].instrIndex]
+ else:
+ il_instr = None
+ color = highlight.HighlightColor._from_core_struct(lines[i].highlight)
+ tokens = binaryninja.function.InstructionTextToken.get_instruction_lines(lines[i].tokens, lines[i].count)
+ result.append(binaryninja.function.DisassemblyTextLine(tokens, addr, il_instr, color))
+ core.BNFreeDisassemblyTextLines(lines, count.value)
+ return result
+
+[docs] def set_auto_highlight(self, color):
+ """
+ ``set_auto_highlight`` highlights the current BasicBlock with the supplied color.
+
+ ..warning:: Use only in analysis plugins. Do not use in regular plugins, as colors won't be saved to the database.
+
+ :param HighlightStandardColor or highlight.HighlightColor color: Color value to use for highlighting
+ """
+ if not isinstance(color, HighlightStandardColor) and not isinstance(color, highlight.HighlightColor):
+ raise ValueError("Specified color is not one of HighlightStandardColor, highlight.HighlightColor")
+ if isinstance(color, HighlightStandardColor):
+ color = highlight.HighlightColor(color)
+ core.BNSetAutoBasicBlockHighlight(self.handle, color._get_core_struct())
+
+[docs] def set_user_highlight(self, color):
+ """
+ ``set_user_highlight`` highlights the current BasicBlock with the supplied color
+
+ :param HighlightStandardColor or highlight.HighlightColor color: Color value to use for highlighting
+ :Example:
+
+ >>> current_basic_block.set_user_highlight(highlight.HighlightColor(red=0xff, blue=0xff, green=0))
+ >>> current_basic_block.set_user_highlight(HighlightStandardColor.BlueHighlightColor)
+ """
+ if not isinstance(color, HighlightStandardColor) and not isinstance(color, highlight.HighlightColor):
+ raise ValueError("Specified color is not one of HighlightStandardColor, highlight.HighlightColor")
+ if isinstance(color, HighlightStandardColor):
+ color = highlight.HighlightColor(color)
+ core.BNSetUserBasicBlockHighlight(self.handle, color._get_core_struct())
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import struct
+import traceback
+import ctypes
+import abc
+import numbers
+
+from collections import OrderedDict
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import (AnalysisState, SymbolType, InstructionTextTokenType,
+ Endianness, ModificationStatus, StringType, SegmentFlag, SectionSemantics, FindFlag, TypeClass)
+import binaryninja
+from binaryninja import associateddatastore # required for _BinaryViewAssociatedDataStore
+from binaryninja import log
+from binaryninja import types
+from binaryninja import fileaccessor
+from binaryninja import databuffer
+from binaryninja import basicblock
+from binaryninja import lineardisassembly
+from binaryninja import metadata
+from binaryninja import highlight
+from binaryninja import function
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import with_metaclass
+
+
+[docs]class BinaryDataNotification(object):
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+[docs]class StringReference(object):
+[docs] def __init__(self, bv, string_type, start, length):
+ self.type = string_type
+ self.start = start
+ self.length = length
+ self.view = bv
+
+ @property
+ def value(self):
+ return binaryninja.pyNativeStr(self.view.read(self.start, self.length))
+
+ def __repr__(self):
+ return "<%s: %#x, len %#x>" % (self.type, self.start, self.length)
+
+
+_pending_analysis_completion_events = {}
+[docs]class AnalysisCompletionEvent(object):
+ """
+ The ``AnalysisCompletionEvent`` object provides an asynchronous mechanism for receiving
+ callbacks when analysis is complete. The callback runs once. A completion event must be added
+ for each new analysis in order to be notified of each analysis completion. The
+ AnalysisCompletionEvent class takes responsibility for keeping track of the object's lifetime.
+
+ :Example:
+ >>> def on_complete(self):
+ ... print("Analysis Complete", self.view)
+ ...
+ >>> evt = AnalysisCompletionEvent(bv, on_complete)
+ >>>
+ """
+[docs] def __init__(self, view, callback):
+ self.view = view
+ self.callback = callback
+ self._cb = ctypes.CFUNCTYPE(None, ctypes.c_void_p)(self._notify)
+ self.handle = core.BNAddAnalysisCompletionEvent(self.view.handle, None, self._cb)
+ global _pending_analysis_completion_events
+ _pending_analysis_completion_events[id(self)] = self
+
+ def __del__(self):
+ global _pending_analysis_completion_events
+ if id(self) in _pending_analysis_completion_events:
+ del _pending_analysis_completion_events[id(self)]
+ core.BNFreeAnalysisCompletionEvent(self.handle)
+
+ def _notify(self, ctxt):
+ global _pending_analysis_completion_events
+ if id(self) in _pending_analysis_completion_events:
+ del _pending_analysis_completion_events[id(self)]
+ try:
+ self.callback(self)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _empty_callback(self):
+ pass
+
+[docs] def cancel(self):
+ """
+ .. warning: This method should only be used when the system is being
+ shut down and no further analysis should be done afterward.
+ """
+ self.callback = self._empty_callback
+ core.BNCancelAnalysisCompletionEvent(self.handle)
+ global _pending_analysis_completion_events
+ if id(self) in _pending_analysis_completion_events:
+ del _pending_analysis_completion_events[id(self)]
+
+
+[docs]class ActiveAnalysisInfo(object):
+[docs] def __init__(self, func, analysis_time, update_count, submit_count):
+ self.func = func
+ self.analysis_time = analysis_time
+ self.update_count = update_count
+ self.submit_count = submit_count
+
+ def __repr__(self):
+ return "<ActiveAnalysisInfo %s, analysis_time %d, update_count %d, submit_count %d>" % (self.func, self.analysis_time, self.update_count, self.submit_count)
+
+
+[docs]class AnalysisInfo(object):
+[docs] def __init__(self, state, analysis_time, active_info):
+ self.state = AnalysisState(state)
+ self.analysis_time = analysis_time
+ self.active_info = active_info
+
+ def __repr__(self):
+ return "<AnalysisInfo %s, analysis_time %d, active_info %s>" % (self.state, self.analysis_time, self.active_info)
+
+
+[docs]class AnalysisProgress(object):
+[docs] def __init__(self, state, count, total):
+ self.state = state
+ self.count = count
+ self.total = total
+
+ def __str__(self):
+ if self.state == AnalysisState.DisassembleState:
+ return "Disassembling (%d/%d)" % (self.count, self.total)
+ if self.state == AnalysisState.AnalyzeState:
+ return "Analyzing (%d/%d)" % (self.count, self.total)
+ if self.state == AnalysisState.ExtendedAnalyzeState:
+ return "Extended Analysis"
+ return "Idle"
+
+ def __repr__(self):
+ return "<progress: %s>" % str(self)
+
+
+[docs]class DataVariable(object):
+[docs] def __init__(self, addr, var_type, auto_discovered, view=None):
+ self.address = addr
+ self.type = var_type
+ self.auto_discovered = auto_discovered
+ self.view = view
+
+ @property
+ def data_refs_from(self):
+ """data cross references from this data variable (read-only)"""
+ return self.view.get_data_refs_from(self.address, len(self.type))
+
+ @property
+ def data_refs(self):
+ """data cross references to this data variable (read-only)"""
+ return self.view.get_data_refs(self.address, len(self.type))
+
+ @property
+ def code_refs(self):
+ """code references to this data variable (read-only)"""
+ return self.view.get_code_refs(self.address, len(self.type))
+
+ def __len__(self):
+ return len(self.type)
+
+ def __repr__(self):
+ return "<var 0x%x: %s>" % (self.address, str(self.type))
+
+
+[docs]class BinaryDataNotificationCallbacks(object):
+[docs] def __init__(self, view, notify):
+ self.view = view
+ self.notify = notify
+ self._cb = core.BNBinaryDataNotification()
+ self._cb.context = 0
+ self._cb.dataWritten = self._cb.dataWritten.__class__(self._data_written)
+ self._cb.dataInserted = self._cb.dataInserted.__class__(self._data_inserted)
+ self._cb.dataRemoved = self._cb.dataRemoved.__class__(self._data_removed)
+ self._cb.functionAdded = self._cb.functionAdded.__class__(self._function_added)
+ self._cb.functionRemoved = self._cb.functionRemoved.__class__(self._function_removed)
+ self._cb.functionUpdated = self._cb.functionUpdated.__class__(self._function_updated)
+ self._cb.functionUpdateRequested = self._cb.functionUpdateRequested.__class__(self._function_update_requested)
+ self._cb.dataVariableAdded = self._cb.dataVariableAdded.__class__(self._data_var_added)
+ self._cb.dataVariableRemoved = self._cb.dataVariableRemoved.__class__(self._data_var_removed)
+ self._cb.dataVariableUpdated = self._cb.dataVariableUpdated.__class__(self._data_var_updated)
+ self._cb.stringFound = self._cb.stringFound.__class__(self._string_found)
+ self._cb.stringRemoved = self._cb.stringRemoved.__class__(self._string_removed)
+ self._cb.typeDefined = self._cb.typeDefined.__class__(self._type_defined)
+ self._cb.typeUndefined = self._cb.typeUndefined.__class__(self._type_undefined)
+
+ def _register(self):
+ core.BNRegisterDataNotification(self.view.handle, self._cb)
+
+ def _unregister(self):
+ core.BNUnregisterDataNotification(self.view.handle, self._cb)
+
+ def _data_written(self, ctxt, view, offset, length):
+ try:
+ self.notify.data_written(self.view, offset, length)
+ except OSError:
+ log.log_error(traceback.format_exc())
+
+ def _data_inserted(self, ctxt, view, offset, length):
+ try:
+ self.notify.data_inserted(self.view, offset, length)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _data_removed(self, ctxt, view, offset, length):
+ try:
+ self.notify.data_removed(self.view, offset, length)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _function_added(self, ctxt, view, func):
+ try:
+ self.notify.function_added(self.view, binaryninja.function.Function(self.view, core.BNNewFunctionReference(func)))
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _function_removed(self, ctxt, view, func):
+ try:
+ self.notify.function_removed(self.view, binaryninja.function.Function(self.view, core.BNNewFunctionReference(func)))
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _function_updated(self, ctxt, view, func):
+ try:
+ self.notify.function_updated(self.view, binaryninja.function.Function(self.view, core.BNNewFunctionReference(func)))
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _function_update_requested(self, ctxt, view, func):
+ try:
+ self.notify.function_update_requested(self.view, binaryninja.function.Function(self.view, core.BNNewFunctionReference(func)))
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _data_var_added(self, ctxt, view, var):
+ try:
+ address = var[0].address
+ var_type = types.Type(core.BNNewTypeReference(var[0].type), platform = self.view.platform, confidence = var[0].typeConfidence)
+ auto_discovered = var[0].autoDiscovered
+ self.notify.data_var_added(self.view, DataVariable(address, var_type, auto_discovered, view))
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _data_var_removed(self, ctxt, view, var):
+ try:
+ address = var[0].address
+ var_type = types.Type(core.BNNewTypeReference(var[0].type), platform = self.view.platform, confidence = var[0].typeConfidence)
+ auto_discovered = var[0].autoDiscovered
+ self.notify.data_var_removed(self.view, DataVariable(address, var_type, auto_discovered, view))
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _data_var_updated(self, ctxt, view, var):
+ try:
+ address = var[0].address
+ var_type = types.Type(core.BNNewTypeReference(var[0].type), platform = self.view.platform, confidence = var[0].typeConfidence)
+ auto_discovered = var[0].autoDiscovered
+ self.notify.data_var_updated(self.view, DataVariable(address, var_type, auto_discovered, view))
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _string_found(self, ctxt, view, string_type, offset, length):
+ try:
+ self.notify.string_found(self.view, StringType(string_type), offset, length)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _string_removed(self, ctxt, view, string_type, offset, length):
+ try:
+ self.notify.string_removed(self.view, StringType(string_type), offset, length)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _type_defined(self, ctxt, view, name, type_obj):
+ try:
+ qualified_name = types.QualifiedName._from_core_struct(name[0])
+ self.notify.type_defined(view, qualified_name, types.Type(core.BNNewTypeReference(type_obj), platform = self.view.platform))
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _type_undefined(self, ctxt, view, name, type_obj):
+ try:
+ qualified_name = types.QualifiedName._from_core_struct(name[0])
+ self.notify.type_undefined(view, qualified_name, types.Type(core.BNNewTypeReference(type_obj), platform = self.view.platform))
+ except:
+ log.log_error(traceback.format_exc())
+
+
+class _BinaryViewTypeMetaclass(type):
+
+ @property
+ def list(self):
+ """List all BinaryView types (read-only)"""
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ types = core.BNGetBinaryViewTypes(count)
+ result = []
+ for i in range(0, count.value):
+ result.append(BinaryViewType(types[i]))
+ core.BNFreeBinaryViewTypeList(types)
+ return result
+
+ def __iter__(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ types = core.BNGetBinaryViewTypes(count)
+ try:
+ for i in range(0, count.value):
+ yield BinaryViewType(types[i])
+ finally:
+ core.BNFreeBinaryViewTypeList(types)
+
+ def __getitem__(self, value):
+ binaryninja._init_plugins()
+ view_type = core.BNGetBinaryViewTypeByName(str(value))
+ if view_type is None:
+ raise KeyError("'%s' is not a valid view type" % str(value))
+ return BinaryViewType(view_type)
+
+
+[docs]class BinaryViewType(with_metaclass(_BinaryViewTypeMetaclass, object)):
+
+[docs] def __init__(self, handle):
+ self.handle = core.handle_of_type(handle, core.BNBinaryViewType)
+
+ def __eq__(self, value):
+ if not isinstance(value, BinaryViewType):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, BinaryViewType):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def list(self):
+ """Allow tab completion to discover metaclass list property"""
+ pass
+
+ @property
+ def name(self):
+ """BinaryView name (read-only)"""
+ return core.BNGetBinaryViewTypeName(self.handle)
+
+ @property
+ def long_name(self):
+ """BinaryView long name (read-only)"""
+ return core.BNGetBinaryViewTypeLongName(self.handle)
+
+ def __repr__(self):
+ return "<view type: '%s'>" % self.name
+
+[docs] def create(self, data):
+ view = core.BNCreateBinaryViewOfType(self.handle, data.handle)
+ if view is None:
+ return None
+ return BinaryView(file_metadata=data.file, handle=view)
+
+[docs] def open(self, src, file_metadata=None):
+ data = BinaryView.open(src, file_metadata)
+ if data is None:
+ return None
+ return self.create(data)
+
+[docs] @classmethod
+ def get_view_of_file(cls, filename, update_analysis=True, progress_func=None):
+ """
+ ``get_view_of_file`` returns the first available, non-Raw `BinaryView` available.
+
+ :param str filename: Path to filename or bndb
+ :param bool update_analysis: defaults to True. Pass False to not run update_analysis_and_wait.
+ :param callable() progress_func: optional function to be called with the current progress and total count.
+ :return: returns a BinaryView object for the given filename.
+ :rtype: BinaryView or None
+ """
+ sqlite = b"SQLite format 3"
+ if filename.endswith(".bndb"):
+ f = open(filename, 'rb')
+ if f is None or f.read(len(sqlite)) != sqlite:
+ return None
+ f.close()
+ view = binaryninja.filemetadata.FileMetadata().open_existing_database(filename, progress_func)
+ else:
+ view = BinaryView.open(filename)
+
+ if view is None:
+ return None
+ for available in view.available_view_types:
+ if available.name != "Raw":
+ if filename.endswith(".bndb"):
+ bv = view.get_view_of_type(available.name)
+ else:
+ bv = cls[available.name].open(filename)
+
+ if bv is None:
+ raise Exception("Unknown Architecture/Architecture Not Found (check plugins folder)")
+
+ if update_analysis:
+ bv.update_analysis_and_wait()
+ return bv
+ return None
+
+[docs] def is_valid_for_data(self, data):
+ return core.BNIsBinaryViewTypeValidForData(self.handle, data.handle)
+
+[docs] def register_arch(self, ident, endian, arch):
+ core.BNRegisterArchitectureForViewType(self.handle, ident, endian, arch.handle)
+
+[docs] def get_arch(self, ident, endian):
+ arch = core.BNGetArchitectureForViewType(self.handle, ident, endian)
+ if arch is None:
+ return None
+ return binaryninja.architecture.CoreArchitecture._from_cache(arch)
+
+[docs] def register_platform(self, ident, arch, plat):
+ core.BNRegisterPlatformForViewType(self.handle, ident, arch.handle, plat.handle)
+
+[docs] def register_default_platform(self, arch, plat):
+ core.BNRegisterDefaultPlatformForViewType(self.handle, arch.handle, plat.handle)
+
+[docs] def get_platform(self, ident, arch):
+ plat = core.BNGetPlatformForViewType(self.handle, ident, arch.handle)
+ if plat is None:
+ return None
+ return binaryninja.platform.Platform(handle = plat)
+
+
+[docs]class Segment(object):
+
+
+ @property
+ def start(self):
+ return core.BNSegmentGetStart(self.handle)
+
+ @property
+ def end(self):
+ return core.BNSegmentGetEnd(self.handle)
+
+ @property
+ def executable(self):
+ return (core.BNSegmentGetFlags(self.handle) & SegmentFlag.SegmentExecutable) != 0
+
+ @property
+ def writable(self):
+ return (core.BNSegmentGetFlags(self.handle) & SegmentFlag.SegmentWritable) != 0
+
+ @property
+ def readable(self):
+ return (core.BNSegmentGetFlags(self.handle) & SegmentFlag.SegmentReadable) != 0
+
+ @property
+ def end(self):
+ return core.BNSegmentGetEnd(self.handle)
+
+ @property
+ def data_length(self):
+ return core.BNSegmentGetDataLength(self.handle)
+
+ @property
+ def data_offset(self):
+ return core.BNSegmentGetDataOffset(self.handle)
+
+ @property
+ def data_end(self):
+ return core.BNSegmentGetDataEnd(self.handle)
+
+ @property
+ def relocation_count(self):
+ return core.BNSegmentGetRelocationsCount(self.handle)
+
+ @property
+ def relocation_ranges(self):
+ """List of relocation range tuples (read-only)"""
+
+ count = ctypes.c_ulonglong()
+ ranges = core.BNSegmentGetRelocationRanges(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append((ranges[i].start, ranges[i].end))
+ core.BNFreeRelocationRanges(ranges, count)
+ return result
+
+[docs] def relocation_ranges_at(self, addr):
+ """List of relocation range tuples (read-only)"""
+
+ count = ctypes.c_ulonglong()
+ ranges = core.BNSegmentGetRelocationRangesAtAddress(self.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ result.append((ranges[i].start, ranges[i].end))
+ core.BNFreeRelocationRanges(ranges, count)
+ return result
+
+ def __del__(self):
+ core.BNFreeSegment(self.handle)
+
+ def __eq__(self, other):
+ if not isinstance(other, Segment):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
+
+ def __ne__(self, other):
+ if not isinstance(other, Segment):
+ return False
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(other.handle.contents)
+
+ def __hash__(self):
+ return hash(self.handle.contents)
+
+ def __len__(self):
+ return core.BNSegmentGetLength(self.handle)
+
+ def __repr__(self):
+ return "<segment: %#x-%#x, %s%s%s>" % (self.start, self.end,
+ "r" if self.readable else "-",
+ "w" if self.writable else "-",
+ "x" if self.executable else "-")
+
+
+[docs]class Section(object):
+
+
+ @property
+ def name(self):
+ return core.BNSectionGetName(self.handle)
+
+ @property
+ def type(self):
+ return core.BNSectionGetType(self.handle)
+
+ @property
+ def start(self):
+ return core.BNSectionGetStart(self.handle)
+
+ @property
+ def linked_section(self):
+ return core.BNSectionGetLinkedSection(self.handle)
+
+ @property
+ def info_section(self):
+ return core.BNSectionGetInfoSection(self.handle)
+
+ @property
+ def info_data(self):
+ return core.BNSectionGetInfoData(self.handle)
+
+ @property
+ def align(self):
+ return core.BNSectionGetAlign(self.handle)
+
+ @property
+ def entry_size(self):
+ return core.BNSectionGetEntrySize(self.handle)
+
+ @property
+ def semantics(self):
+ return SectionSemantics(core.BNSectionGetSemantics(self.handle))
+
+ @property
+ def auto_defined(self):
+ return core.BNSectionIsAutoDefined(self.handle)
+
+ @property
+ def end(self):
+ return self.start + len(self)
+
+ def __del__(self):
+ core.BNFreeSection(self.handle)
+
+ def __eq__(self, other):
+ if not isinstance(other, Section):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
+
+ def __ne__(self, other):
+ if not isinstance(other, Section):
+ return False
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(other.handle.contents)
+
+ def __hash__(self):
+ return hash(self.handle.contents)
+
+ def __len__(self):
+ return core.BNSectionGetLength(self.handle)
+
+ def __repr__(self):
+ return "<section %s: %#x-%#x>" % (self.name, self.start, self.end)
+
+
+[docs]class AddressRange(object):
+
+
+ @property
+ def length(self):
+ return self.end - self.start
+
+ def __len__(self):
+ return self.end - self.start
+
+ def __repr__(self):
+ return "<%#x-%#x>" % (self.start, self.end)
+
+
+class _BinaryViewAssociatedDataStore(associateddatastore._AssociatedDataStore):
+ _defaults = {}
+
+
+[docs]class BinaryView(object):
+ """
+ ``class BinaryView`` implements a view on binary data, and presents a queryable interface of a binary file. One key
+ job of BinaryView is file format parsing which allows Binary Ninja to read, write, insert, remove portions
+ of the file given a virtual address. For the purposes of this documentation we define a virtual address as the
+ memory address that the various pieces of the physical file will be loaded at.
+
+ A binary file does not have to have just one BinaryView, thus much of the interface to manipulate disassembly exists
+ within or is accessed through a BinaryView. All files are guaranteed to have at least the ``Raw`` BinaryView. The
+ ``Raw`` BinaryView is simply a hex editor, but is helpful for manipulating binary files via their absolute addresses.
+
+ BinaryViews are plugins and thus registered with Binary Ninja at startup, and thus should **never** be instantiated
+ directly as this is already done. The list of available BinaryViews can be seen in the BinaryViewType class which
+ provides an iterator and map of the various installed BinaryViews::
+
+ >>> list(BinaryViewType)
+ [<view type: 'Raw'>, <view type: 'ELF'>, <view type: 'Mach-O'>, <view type: 'PE'>]
+ >>> BinaryViewType['ELF']
+ <view type: 'ELF'>
+
+ To open a file with a given BinaryView the following code can be used::
+
+ >>> bv = BinaryViewType['Mach-O'].open("/bin/ls")
+ >>> bv
+ <BinaryView: '/bin/ls', start 0x100000000, len 0xa000>
+
+ `By convention in the rest of this document we will use bv to mean an open BinaryView of an executable file.`
+ When a BinaryView is open on an executable view, analysis does not automatically run, this can be done by running
+ the ``update_analysis_and_wait()`` method which disassembles the executable and returns when all disassembly is
+ finished::
+
+ >>> bv.update_analysis_and_wait()
+ >>>
+
+ Since BinaryNinja's analysis is multi-threaded (depending on version) this can also be done in the background by
+ using the ``update_analysis()`` method instead.
+
+ By standard python convention methods which start with '_' should be considered private and should not be called
+ externally. Additionally, methods which begin with ``perform_`` should not be called either and are
+ used explicitly for subclassing the BinaryView.
+
+ .. note:: An important note on the ``*_user_*()`` methods. Binary Ninja makes a distinction between edits \
+ performed by the user and actions performed by auto analysis. Auto analysis actions that can quickly be recalculated \
+ are not saved to the database. Auto analysis actions that take a long time and all user edits are stored in the \
+ database (e.g. ``remove_user_function()`` rather than ``remove_function()``). Thus use ``_user_`` methods if saving \
+ to the database is desired.
+ """
+ name = None
+ long_name = None
+ _registered = False
+ _registered_cb = None
+ registered_view_type = None
+ next_address = 0
+ _associated_data = {}
+ _registered_instances = []
+
+[docs] def __init__(self, file_metadata=None, parent_view=None, handle=None):
+ self._must_free = True
+ if handle is not None:
+ self.handle = core.handle_of_type(handle, core.BNBinaryView)
+ if file_metadata is None:
+ self.file = binaryninja.filemetadata.FileMetadata(handle=core.BNGetFileForView(handle))
+ else:
+ self.file = file_metadata
+ elif self.__class__ is BinaryView:
+ binaryninja._init_plugins()
+ if file_metadata is None:
+ file_metadata = binaryninja.filemetadata.FileMetadata()
+ self.handle = core.BNCreateBinaryDataView(file_metadata.handle)
+ self.file = binaryninja.filemetadata.FileMetadata(handle=core.BNNewFileReference(file_metadata.handle))
+ else:
+ binaryninja._init_plugins()
+ if not self.__class__._registered:
+ raise TypeError("view type not registered")
+ self._cb = core.BNCustomBinaryView()
+ self._cb.context = 0
+ self._cb.init = self._cb.init.__class__(self._init)
+ self._cb.freeObject = self._cb.freeObject.__class__(self._free_object)
+ self._cb.read = self._cb.read.__class__(self._read)
+ self._cb.write = self._cb.write.__class__(self._write)
+ self._cb.insert = self._cb.insert.__class__(self._insert)
+ self._cb.remove = self._cb.remove.__class__(self._remove)
+ self._cb.getModification = self._cb.getModification.__class__(self._get_modification)
+ self._cb.isValidOffset = self._cb.isValidOffset.__class__(self._is_valid_offset)
+ self._cb.isOffsetReadable = self._cb.isOffsetReadable.__class__(self._is_offset_readable)
+ self._cb.isOffsetWritable = self._cb.isOffsetWritable.__class__(self._is_offset_writable)
+ self._cb.isOffsetExecutable = self._cb.isOffsetExecutable.__class__(self._is_offset_executable)
+ self._cb.getNextValidOffset = self._cb.getNextValidOffset.__class__(self._get_next_valid_offset)
+ self._cb.getStart = self._cb.getStart.__class__(self._get_start)
+ self._cb.getLength = self._cb.getLength.__class__(self._get_length)
+ self._cb.getEntryPoint = self._cb.getEntryPoint.__class__(self._get_entry_point)
+ self._cb.isExecutable = self._cb.isExecutable.__class__(self._is_executable)
+ self._cb.getDefaultEndianness = self._cb.getDefaultEndianness.__class__(self._get_default_endianness)
+ self._cb.isRelocatable = self._cb.isRelocatable.__class__(self._is_relocatable)
+ self._cb.getAddressSize = self._cb.getAddressSize.__class__(self._get_address_size)
+ self._cb.save = self._cb.save.__class__(self._save)
+ self.file = file_metadata
+ if parent_view is not None:
+ parent_view = parent_view.handle
+ self.handle = core.BNCreateCustomBinaryView(self.__class__.name, file_metadata.handle, parent_view, self._cb)
+ self.__class__._registered_instances.append(self)
+ self._must_free = False
+ self.notifications = {}
+ self.next_address = None # Do NOT try to access view before init() is called, use placeholder
+
+ def __eq__(self, value):
+ if not isinstance(value, BinaryView):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, BinaryView):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+[docs] @classmethod
+ def register(cls):
+ binaryninja._init_plugins()
+ if cls.name is None:
+ raise ValueError("view 'name' not defined")
+ if cls.long_name is None:
+ cls.long_name = cls.name
+ cls._registered_cb = core.BNCustomBinaryViewType()
+ cls._registered_cb.context = 0
+ cls._registered_cb.create = cls._registered_cb.create.__class__(cls._create)
+ cls._registered_cb.isValidForData = cls._registered_cb.isValidForData.__class__(cls._is_valid_for_data)
+ cls.registered_view_type = BinaryViewType(core.BNRegisterBinaryViewType(cls.name, cls.long_name, cls._registered_cb))
+ cls._registered = True
+
+ @classmethod
+ def _create(cls, ctxt, data):
+ try:
+ file_metadata = binaryninja.filemetadata.FileMetadata(handle=core.BNGetFileForView(data))
+ view = cls(BinaryView(file_metadata=file_metadata, handle=core.BNNewViewReference(data)))
+ if view is None:
+ return None
+ return ctypes.cast(core.BNNewViewReference(view.handle), ctypes.c_void_p).value
+ except:
+ log.log_error(traceback.format_exc())
+ return None
+
+ @classmethod
+ def _is_valid_for_data(cls, ctxt, data):
+ try:
+ return cls.is_valid_for_data(BinaryView(handle=core.BNNewViewReference(data)))
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+[docs] @classmethod
+ def open(cls, src, file_metadata=None):
+ binaryninja._init_plugins()
+ if isinstance(src, fileaccessor.FileAccessor):
+ if file_metadata is None:
+ file_metadata = binaryninja.filemetadata.FileMetadata()
+ view = core.BNCreateBinaryDataViewFromFile(file_metadata.handle, src._cb)
+ else:
+ if file_metadata is None:
+ file_metadata = binaryninja.filemetadata.FileMetadata(str(src))
+ view = core.BNCreateBinaryDataViewFromFilename(file_metadata.handle, str(src))
+ if view is None:
+ return None
+ result = BinaryView(file_metadata=file_metadata, handle=view)
+ return result
+
+[docs] @classmethod
+ def new(cls, data=None, file_metadata=None):
+ binaryninja._init_plugins()
+ if file_metadata is None:
+ file_metadata = binaryninja.filemetadata.FileMetadata()
+ if data is None:
+ view = core.BNCreateBinaryDataView(file_metadata.handle)
+ else:
+ buf = databuffer.DataBuffer(data)
+ view = core.BNCreateBinaryDataViewFromBuffer(file_metadata.handle, buf.handle)
+ if view is None:
+ return None
+ result = BinaryView(file_metadata=file_metadata, handle=view)
+ return result
+
+ @classmethod
+ def _unregister(cls, view):
+ handle = ctypes.cast(view, ctypes.c_void_p)
+ if handle.value in cls._associated_data:
+ del cls._associated_data[handle.value]
+
+[docs] @classmethod
+ def set_default_session_data(cls, name, value):
+ """
+ ``set_default_session_data`` saves a variable to the BinaryView.
+ :param name: name of the variable to be saved
+ :param value: value of the variable to be saved
+
+ :Example:
+ >>> BinaryView.set_default_session_data("variable_name", "value")
+ >>> bv.session_data.variable_name
+ 'value'
+ """
+ _BinaryViewAssociatedDataStore.set_default(name, value)
+
+ @property
+ def basic_blocks(self):
+ """A generator of all BasicBlock objects in the BinaryView"""
+ for func in self:
+ for block in func.basic_blocks:
+ yield block
+
+ @property
+ def llil_basic_blocks(self):
+ """A generator of all LowLevelILBasicBlock objects in the BinaryView"""
+ for func in self:
+ for il_block in func.low_level_il.basic_blocks:
+ yield il_block
+
+ @property
+ def mlil_basic_blocks(self):
+ """A generator of all MediumLevelILBasicBlock objects in the BinaryView"""
+ for func in self:
+ for il_block in func.mlil.basic_blocks:
+ yield il_block
+
+ @property
+ def instructions(self):
+ """A generator of instruction tokens and their start addresses"""
+ for block in self.basic_blocks:
+ start = block.start
+ for i in block:
+ yield (i[0], start)
+ start += i[1]
+
+ @property
+ def llil_instructions(self):
+ """A generator of llil instructions"""
+ for block in self.llil_basic_blocks:
+ for i in block:
+ yield i
+
+ @property
+ def mlil_instructions(self):
+ """A generator of mlil instructions"""
+ for block in self.mlil_basic_blocks:
+ for i in block:
+ yield i
+
+ def __del__(self):
+ for i in self.notifications.values():
+ i._unregister()
+ if self._must_free:
+ core.BNFreeBinaryView(self.handle)
+
+ def __iter__(self):
+ count = ctypes.c_ulonglong(0)
+ funcs = core.BNGetAnalysisFunctionList(self.handle, count)
+ try:
+ for i in range(0, count.value):
+ yield binaryninja.function.Function(self, core.BNNewFunctionReference(funcs[i]))
+ finally:
+ core.BNFreeFunctionList(funcs, count.value)
+
+ @property
+ def parent_view(self):
+ """View that contains the raw data used by this view (read-only)"""
+ result = core.BNGetParentView(self.handle)
+ if result is None:
+ return None
+ return BinaryView(handle=result)
+
+ @property
+ def modified(self):
+ """boolean modification state of the BinaryView (read/write)"""
+ return self.file.modified
+
+ @modified.setter
+ def modified(self, value):
+ self.file.modified = value
+
+ @property
+ def analysis_changed(self):
+ """boolean analysis state changed of the currently running analysis (read-only)"""
+ return self.file.analysis_changed
+
+ @property
+ def has_database(self):
+ """boolean has a database been written to disk (read-only)"""
+ return self.file.has_database
+
+ @property
+ def view(self):
+ return self.file.view
+
+ @view.setter
+ def view(self, value):
+ self.file.view = value
+
+ @property
+ def offset(self):
+ return self.file.offset
+
+ @offset.setter
+ def offset(self, value):
+ self.file.offset = value
+
+ @property
+ def start(self):
+ """Start offset of the binary (read-only)"""
+ return core.BNGetStartOffset(self.handle)
+
+ @property
+ def end(self):
+ """End offset of the binary (read-only)"""
+ return core.BNGetEndOffset(self.handle)
+
+ @property
+ def entry_point(self):
+ """Entry point of the binary (read-only)"""
+ return core.BNGetEntryPoint(self.handle)
+
+ @property
+ def arch(self):
+ """The architecture associated with the current BinaryView (read/write)"""
+ arch = core.BNGetDefaultArchitecture(self.handle)
+ if arch is None:
+ return None
+ return binaryninja.architecture.CoreArchitecture._from_cache(handle=arch)
+
+ @arch.setter
+ def arch(self, value):
+ if value is None:
+ core.BNSetDefaultArchitecture(self.handle, None)
+ else:
+ core.BNSetDefaultArchitecture(self.handle, value.handle)
+
+ @property
+ def platform(self):
+ """The platform associated with the current BinaryView (read/write)"""
+ plat = core.BNGetDefaultPlatform(self.handle)
+ if plat is None:
+ return None
+ return binaryninja.platform.Platform(self.arch, handle=plat)
+
+ @platform.setter
+ def platform(self, value):
+ if value is None:
+ core.BNSetDefaultPlatform(self.handle, None)
+ else:
+ core.BNSetDefaultPlatform(self.handle, value.handle)
+
+ @property
+ def endianness(self):
+ """Endianness of the binary (read-only)"""
+ return Endianness(core.BNGetDefaultEndianness(self.handle))
+
+ @property
+ def relocatable(self):
+ """Boolean - is the binary relocatable (read-only)"""
+ return core.BNIsRelocatable(self.handle)
+
+ @property
+ def address_size(self):
+ """Address size of the binary (read-only)"""
+ return core.BNGetViewAddressSize(self.handle)
+
+ @property
+ def executable(self):
+ """Whether the binary is an executable (read-only)"""
+ return core.BNIsExecutableView(self.handle)
+
+ @property
+ def functions(self):
+ """List of functions (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ funcs = core.BNGetAnalysisFunctionList(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(binaryninja.function.Function(self, core.BNNewFunctionReference(funcs[i])))
+ core.BNFreeFunctionList(funcs, count.value)
+ return result
+
+ @property
+ def has_functions(self):
+ """Boolean whether the binary has functions (read-only)"""
+ return core.BNHasFunctions(self.handle)
+
+ @property
+ def entry_function(self):
+ """Entry function (read-only)"""
+ func = core.BNGetAnalysisEntryPoint(self.handle)
+ if func is None:
+ return None
+ return binaryninja.function.Function(self, func)
+
+ @property
+ def symbols(self):
+ """Dict of symbols (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ syms = core.BNGetSymbols(self.handle, count, None)
+ result = {}
+ for i in range(0, count.value):
+ sym = types.Symbol(None, None, None, handle=core.BNNewSymbolReference(syms[i]))
+ if sym.raw_name in result:
+ result[sym.raw_name] = [result[sym.raw_name], sym]
+ else:
+ result[sym.raw_name] = sym
+ core.BNFreeSymbolList(syms, count.value)
+ return result
+
+[docs] @classmethod
+ def internal_namespace(self):
+ """Internal namespace for the current BinaryView"""
+ ns = core.BNGetInternalNameSpace()
+ result = types.NameSpace._from_core_struct(ns)
+ core.BNFreeNameSpace(ns)
+ return result
+
+[docs] @classmethod
+ def external_namespace(self):
+ """External namespace for the current BinaryView"""
+ ns = core.BNGetExternalNameSpace()
+ result = types.NameSpace._from_core_struct(ns)
+ core.BNFreeNameSpace(ns)
+ return result
+
+ @property
+ def namespaces(self):
+ """Returns a list of namespaces for the current BinaryView"""
+ count = ctypes.c_ulonglong(0)
+ nameSpaceList = core.BNGetNameSpaces(self.handle, count)
+ result = []
+ for i in range(count.value):
+ result.append(types.NameSpace._from_core_struct(nameSpaceList[i]))
+ core.BNFreeNameSpaceList(nameSpaceList, count.value);
+ return result
+
+ @property
+ def view_type(self):
+ """View type (read-only)"""
+ return core.BNGetViewType(self.handle)
+
+ @property
+ def available_view_types(self):
+ """Available view types (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ types = core.BNGetBinaryViewTypesForData(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(BinaryViewType(types[i]))
+ core.BNFreeBinaryViewTypeList(types)
+ return result
+
+ @property
+ def strings(self):
+ """List of strings (read-only)"""
+ return self.get_strings()
+
+ @property
+ def saved(self):
+ """boolean state of whether or not the file has been saved (read/write)"""
+ return self.file.saved
+
+ @saved.setter
+ def saved(self, value):
+ self.file.saved = value
+
+ @property
+ def analysis_info(self):
+ """Provides instantaneous analysis state information and a list of current functions under analysis (read-only).
+ All times are given in units of milliseconds (ms). Per function `analysis_time` is the aggregation of time spent
+ performing incremental updates and is reset on a full function update. Per function `update_count` tracks the
+ current number of incremental updates and is reset on a full function update. Per function `submit_count` tracks the
+ current number of full updates that have completed. Note that the `submit_count` is currently not reset across analysis updates."""
+ info_ref = core.BNGetAnalysisInfo(self.handle)
+ info = info_ref[0]
+ active_info_list = []
+ for i in range(0, info.count):
+ func = binaryninja.function.Function(self, core.BNNewFunctionReference(info.activeInfo[i].func))
+ active_info = ActiveAnalysisInfo(func, info.activeInfo[i].analysisTime, info.activeInfo[i].updateCount, info.activeInfo[i].submitCount)
+ active_info_list.append(active_info)
+ result = AnalysisInfo(info.state, info.analysisTime, active_info_list)
+ core.BNFreeAnalysisInfo(info_ref)
+ return result
+
+ @property
+ def analysis_progress(self):
+ """Status of current analysis (read-only)"""
+ result = core.BNGetAnalysisProgress(self.handle)
+ return AnalysisProgress(result.state, result.count, result.total)
+
+ @property
+ def linear_disassembly(self):
+ """Iterator for all lines in the linear disassembly of the view"""
+ return self.get_linear_disassembly(None)
+
+ @property
+ def data_vars(self):
+ """List of data variables (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ var_list = core.BNGetDataVariables(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ addr = var_list[i].address
+ var_type = types.Type(core.BNNewTypeReference(var_list[i].type), platform = self.platform, confidence = var_list[i].typeConfidence)
+ auto_discovered = var_list[i].autoDiscovered
+ result[addr] = DataVariable(addr, var_type, auto_discovered, self)
+ core.BNFreeDataVariables(var_list, count.value)
+ return result
+
+ @property
+ def types(self):
+ """List of defined types (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ type_list = core.BNGetAnalysisTypeList(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ name = types.QualifiedName._from_core_struct(type_list[i].name)
+ result[name] = types.Type(core.BNNewTypeReference(type_list[i].type), platform = self.platform)
+ core.BNFreeTypeList(type_list, count.value)
+ return result
+
+ @property
+ def segments(self):
+ """List of segments (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ segment_list = core.BNGetSegments(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(Segment(core.BNNewSegmentReference(segment_list[i])))
+ core.BNFreeSegmentList(segment_list, count.value)
+ return result
+
+ @property
+ def sections(self):
+ """List of sections (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ section_list = core.BNGetSections(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ result[core.BNSectionGetName(section_list[i])] = Section(core.BNNewSectionReference(section_list[i]))
+ core.BNFreeSectionList(section_list, count.value)
+ return result
+
+ @property
+ def allocated_ranges(self):
+ """List of valid address ranges for this view (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ range_list = core.BNGetAllocatedRanges(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(AddressRange(range_list[i].start, range_list[i].end))
+ core.BNFreeAddressRanges(range_list)
+ return result
+
+ @property
+ def session_data(self):
+ """Dictionary object where plugins can store arbitrary data associated with the view"""
+ handle = ctypes.cast(self.handle, ctypes.c_void_p)
+ if handle.value not in BinaryView._associated_data:
+ obj = _BinaryViewAssociatedDataStore()
+ BinaryView._associated_data[handle.value] = obj
+ return obj
+ else:
+ return BinaryView._associated_data[handle.value]
+
+ @property
+ def global_pointer_value(self):
+ """Discovered value of the global pointer register, if the binary uses one (read-only)"""
+ result = core.BNGetGlobalPointerValue(self.handle)
+ return binaryninja.function.RegisterValue(self.arch, result.value, confidence = result.confidence)
+
+ @property
+ def parameters_for_analysis(self):
+ return core.BNGetParametersForAnalysis(self.handle)
+
+ @parameters_for_analysis.setter
+ def parameters_for_analysis(self, params):
+ core.BNSetParametersForAnalysis(self.handle, params)
+
+ @property
+ def max_function_size_for_analysis(self):
+ """Maximum size of function (sum of basic block sizes in bytes) for auto analysis"""
+ return core.BNGetMaxFunctionSizeForAnalysis(self.handle)
+
+ @max_function_size_for_analysis.setter
+ def max_function_size_for_analysis(self, size):
+ core.BNSetMaxFunctionSizeForAnalysis(self.handle, size)
+
+ @property
+ def relocation_ranges(self):
+ """List of relocation range tuples (read-only)"""
+
+ count = ctypes.c_ulonglong()
+ ranges = core.BNGetRelocationRanges(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append((ranges[i].start, ranges[i].end))
+ core.BNFreeRelocationRanges(ranges, count)
+ return result
+
+[docs] def relocation_ranges_at(self, addr):
+ """List of relocation range tuples for a given address"""
+
+ count = ctypes.c_ulonglong()
+ ranges = core.BNGetRelocationRangesAtAddress(self.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ result.append((ranges[i].start, ranges[i].end))
+ core.BNFreeRelocationRanges(ranges, count)
+ return result
+
+ @property
+ def new_auto_function_analysis_suppressed(self):
+ """Whether or not automatically discovered functions will be analyzed"""
+ return core.BNGetNewAutoFunctionAnalysisSuppressed(self.handle)
+
+ @new_auto_function_analysis_suppressed.setter
+ def new_auto_function_analysis_suppressed(self, suppress):
+ core.BNSetNewAutoFunctionAnalysisSuppressed(self.handle, suppress)
+
+ def __len__(self):
+ return int(core.BNGetViewLength(self.handle))
+
+ def __getitem__(self, i):
+ if isinstance(i, tuple):
+ result = ""
+ for s in i:
+ result += self.__getitem__(s)
+ return result
+ elif isinstance(i, slice):
+ if i.step is not None:
+ raise IndexError("step not implemented")
+ i = i.indices(self.end)
+ start = i[0]
+ stop = i[1]
+ if stop <= start:
+ return ""
+ return str(self.read(start, stop - start))
+ elif i < 0:
+ if i >= -len(self):
+ value = str(self.read(int(len(self) + i), 1))
+ if len(value) == 0:
+ return IndexError("index not readable")
+ return value
+ raise IndexError("index out of range")
+ elif (i >= self.start) and (i < self.end):
+ value = str(self.read(int(i), 1))
+ if len(value) == 0:
+ return IndexError("index not readable")
+ return value
+ else:
+ raise IndexError("index out of range")
+
+ def __setitem__(self, i, value):
+ if isinstance(i, slice):
+ if i.step is not None:
+ raise IndexError("step not supported on assignment")
+ i = i.indices(self.end)
+ start = i[0]
+ stop = i[1]
+ if stop < start:
+ stop = start
+ if len(value) != (stop - start):
+ self.remove(start, stop - start)
+ self.insert(start, value)
+ else:
+ self.write(start, value)
+ elif i < 0:
+ if i >= -len(self):
+ if len(value) != 1:
+ raise ValueError("expected single byte for assignment")
+ if self.write(int(len(self) + i), value) != 1:
+ raise IndexError("index not writable")
+ else:
+ raise IndexError("index out of range")
+ elif (i >= self.start) and (i < self.end):
+ if len(value) != 1:
+ raise ValueError("expected single byte for assignment")
+ if self.write(int(i), value) != 1:
+ raise IndexError("index not writable")
+ else:
+ raise IndexError("index out of range")
+
+ def __repr__(self):
+ start = self.start
+ length = len(self)
+ if start != 0:
+ size = "start %#x, len %#x" % (start, length)
+ else:
+ size = "len %#x" % length
+ filename = self.file.filename
+ if len(filename) > 0:
+ return "<BinaryView: '%s', %s>" % (filename, size)
+ return "<BinaryView: %s>" % (size)
+
+ def _init(self, ctxt):
+ try:
+ return self.init()
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _free_object(self, ctxt):
+ try:
+ self.__class__._registered_instances.remove(self)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _read(self, ctxt, dest, offset, length):
+ try:
+ data = self.perform_read(offset, length)
+ if data is None:
+ return 0
+ if len(data) > length:
+ data = data[0:length]
+ ctypes.memmove(dest, str(data), len(data))
+ return len(data)
+ except:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _write(self, ctxt, offset, src, length):
+ try:
+ data = ctypes.create_string_buffer(length)
+ ctypes.memmove(data, src, length)
+ return self.perform_write(offset, data.raw)
+ except:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _insert(self, ctxt, offset, src, length):
+ try:
+ data = ctypes.create_string_buffer(length)
+ ctypes.memmove(data, src, length)
+ return self.perform_insert(offset, data.raw)
+ except:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _remove(self, ctxt, offset, length):
+ try:
+ return self.perform_remove(offset, length)
+ except:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _get_modification(self, ctxt, offset):
+ try:
+ return self.perform_get_modification(offset)
+ except:
+ log.log_error(traceback.format_exc())
+ return ModificationStatus.Original
+
+ def _is_valid_offset(self, ctxt, offset):
+ try:
+ return self.perform_is_valid_offset(offset)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _is_offset_readable(self, ctxt, offset):
+ try:
+ return self.perform_is_offset_readable(offset)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _is_offset_writable(self, ctxt, offset):
+ try:
+ return self.perform_is_offset_writable(offset)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _is_offset_executable(self, ctxt, offset):
+ try:
+ return self.perform_is_offset_executable(offset)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_next_valid_offset(self, ctxt, offset):
+ try:
+ return self.perform_get_next_valid_offset(offset)
+ except:
+ log.log_error(traceback.format_exc())
+ return offset
+
+ def _get_start(self, ctxt):
+ try:
+ return self.perform_get_start()
+ except:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _get_length(self, ctxt):
+ try:
+ return self.perform_get_length()
+ except:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _get_entry_point(self, ctxt):
+ try:
+ return self.perform_get_entry_point()
+ except:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _is_executable(self, ctxt):
+ try:
+ return self.perform_is_executable()
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_default_endianness(self, ctxt):
+ try:
+ return self.perform_get_default_endianness()
+ except:
+ log.log_error(traceback.format_exc())
+ return Endianness.LittleEndian
+
+ def _is_relocatable(self, ctxt):
+ try:
+ return self.perform_is_relocatable()
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_address_size(self, ctxt):
+ try:
+ return self.perform_get_address_size()
+ except:
+ log.log_error(traceback.format_exc())
+ return 8
+
+ def _save(self, ctxt, file_accessor):
+ try:
+ return self.perform_save(fileaccessor.CoreFileAccessor(file_accessor))
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+
+
+[docs] def get_disassembly(self, addr, arch=None):
+ """
+ ``get_disassembly`` simple helper function for printing disassembly of a given address
+
+ :param int addr: virtual address of instruction
+ :param Architecture arch: optional Architecture, ``self.arch`` is used if this parameter is None
+ :return: a str representation of the instruction at virtual address ``addr`` or None
+ :rtype: str or None
+ :Example:
+
+ >>> bv.get_disassembly(bv.entry_point)
+ 'push ebp'
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ txt, size = arch.get_instruction_text(self.read(addr, arch.max_instr_length), addr)
+ self.next_address = addr + size
+ if txt is None:
+ return None
+ return ''.join(str(a) for a in txt).strip()
+
+[docs] def get_next_disassembly(self, arch=None):
+ """
+ ``get_next_disassembly`` simple helper function for printing disassembly of the next instruction.
+ The internal state of the instruction to be printed is stored in the ``next_address`` attribute
+
+ :param Architecture arch: optional Architecture, ``self.arch`` is used if this parameter is None
+ :return: a str representation of the instruction at virtual address ``self.next_address``
+ :rtype: str or None
+ :Example:
+
+ >>> bv.get_next_disassembly()
+ 'push ebp'
+ >>> bv.get_next_disassembly()
+ 'mov ebp, esp'
+ >>> #Now reset the starting point back to the entry point
+ >>> bv.next_address = bv.entry_point
+ >>> bv.get_next_disassembly()
+ 'push ebp'
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ if self.next_address is None:
+ self.next_address = self.entry_point
+ txt, size = arch.get_instruction_text(self.read(self.next_address, arch.max_instr_length), self.next_address)
+ self.next_address += size
+ if txt is None:
+ return None
+ return ''.join(str(a) for a in txt).strip()
+
+[docs] def perform_save(self, accessor):
+ if self.parent_view is not None:
+ return self.parent_view.save(accessor)
+ return False
+
+
+
+[docs] def perform_get_length(self):
+ """
+ ``perform_get_length`` implements a query for the size of the virtual address range used by
+ the BinaryView.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. Use ``add_auto_segment`` to provide
+ data without overriding this method.
+ .. warning:: This method **must not** be called directly.
+
+ :return: returns the size of the virtual address range used by the BinaryView.
+ :rtype: int
+ """
+ return 0
+
+[docs] def perform_read(self, addr, length):
+ """
+ ``perform_read`` implements a mapping between a virtual address and an absolute file offset, reading
+ ``length`` bytes from the rebased address ``addr``.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. Use ``add_auto_segment`` to provide
+ data without overriding this method.
+ .. warning:: This method **must not** be called directly.
+
+ :param int addr: a virtual address to attempt to read from
+ :param int length: the number of bytes to be read
+ :return: length bytes read from addr, should return empty string on error
+ :rtype: str
+ """
+ return ""
+
+[docs] def perform_write(self, addr, data):
+ """
+ ``perform_write`` implements a mapping between a virtual address and an absolute file offset, writing
+ the bytes ``data`` to rebased address ``addr``.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. Use ``add_auto_segment`` to provide
+ data without overriding this method.
+ .. warning:: This method **must not** be called directly.
+
+ :param int addr: a virtual address
+ :param str data: the data to be written
+ :return: length of data written, should return 0 on error
+ :rtype: int
+ """
+ return 0
+
+[docs] def perform_insert(self, addr, data):
+ """
+ ``perform_insert`` implements a mapping between a virtual address and an absolute file offset, inserting
+ the bytes ``data`` to rebased address ``addr``.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. If not overridden, inserting is disallowed
+ .. warning:: This method **must not** be called directly.
+
+ :param int addr: a virtual address
+ :param str data: the data to be inserted
+ :return: length of data inserted, should return 0 on error
+ :rtype: int
+ """
+ return 0
+
+[docs] def perform_remove(self, addr, length):
+ """
+ ``perform_remove`` implements a mapping between a virtual address and an absolute file offset, removing
+ ``length`` bytes from the rebased address ``addr``.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. If not overridden, removing data is disallowed
+ .. warning:: This method **must not** be called directly.
+
+ :param int addr: a virtual address
+ :param str data: the data to be removed
+ :return: length of data removed, should return 0 on error
+ :rtype: int
+ """
+ return 0
+
+[docs] def perform_get_modification(self, addr):
+ """
+ ``perform_get_modification`` implements query to the whether the virtual address ``addr`` is modified.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. Use ``add_auto_segment`` to provide
+ data without overriding this method.
+ .. warning:: This method **must not** be called directly.
+
+ :param int addr: a virtual address to be checked
+ :return: One of the following: Original = 0, Changed = 1, Inserted = 2
+ :rtype: ModificationStatus
+ """
+ return ModificationStatus.Original
+
+[docs] def perform_is_valid_offset(self, addr):
+ """
+ ``perform_is_valid_offset`` implements a check if an virtual address ``addr`` is valid.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. Use ``add_auto_segment`` to provide
+ data without overriding this method.
+ .. warning:: This method **must not** be called directly.
+
+ :param int addr: a virtual address to be checked
+ :return: true if the virtual address is valid, false if the virtual address is invalid or error
+ :rtype: bool
+ """
+ data = self.read(addr, 1)
+ return (data is not None) and (len(data) == 1)
+
+[docs] def perform_is_offset_readable(self, offset):
+ """
+ ``perform_is_offset_readable`` implements a check if an virtual address is readable.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. Use ``add_auto_segment`` to provide
+ data without overriding this method.
+ .. warning:: This method **must not** be called directly.
+
+ :param int offset: a virtual address to be checked
+ :return: true if the virtual address is readable, false if the virtual address is not readable or error
+ :rtype: bool
+ """
+ return self.is_valid_offset(offset)
+
+[docs] def perform_is_offset_writable(self, addr):
+ """
+ ``perform_is_offset_writable`` implements a check if a virtual address ``addr`` is writable.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. Use ``add_auto_segment`` to provide
+ data without overriding this method.
+ .. warning:: This method **must not** be called directly.
+
+ :param int addr: a virtual address to be checked
+ :return: true if the virtual address is writable, false if the virtual address is not writable or error
+ :rtype: bool
+ """
+ return self.is_valid_offset(addr)
+
+[docs] def perform_is_offset_executable(self, addr):
+ """
+ ``perform_is_offset_executable`` implements a check if a virtual address ``addr`` is executable.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. Use ``add_auto_segment`` to provide
+ data without overriding this method.
+ .. warning:: This method **must not** be called directly.
+
+ :param int addr: a virtual address to be checked
+ :return: true if the virtual address is executable, false if the virtual address is not executable or error
+ :rtype: int
+ """
+ return self.is_valid_offset(addr)
+
+[docs] def perform_get_next_valid_offset(self, addr):
+ """
+ ``perform_get_next_valid_offset`` implements a query for the next valid readable, writable, or executable virtual
+ memory address.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. Use ``add_auto_segment`` to provide
+ data without overriding this method.
+ .. warning:: This method **must not** be called directly.
+
+ :param int addr: a virtual address to start checking from.
+ :return: the next readable, writable, or executable virtual memory address
+ :rtype: int
+ """
+ if addr < self.perform_get_start():
+ return self.perform_get_start()
+ return addr
+
+[docs] def perform_get_start(self):
+ """
+ ``perform_get_start`` implements a query for the first readable, writable, or executable virtual address in
+ the BinaryView.
+
+ .. note:: This method **may** be overridden by custom BinaryViews. Use ``add_auto_segment`` to provide
+ data without overriding this method.
+ .. warning:: This method **must not** be called directly.
+
+ :return: returns the first virtual address in the BinaryView.
+ :rtype: int
+ """
+ return 0
+
+[docs] def perform_get_entry_point(self):
+ """
+ ``perform_get_entry_point`` implements a query for the initial entry point for code execution.
+
+ .. note:: This method **should** be implemented for custom BinaryViews that are executable.
+ .. warning:: This method **must not** be called directly.
+
+ :return: the virtual address of the entry point
+ :rtype: int
+ """
+ return 0
+
+[docs] def perform_is_executable(self):
+ """
+ ``perform_is_executable`` implements a check which returns true if the BinaryView is executable.
+
+ .. note:: This method **must** be implemented for custom BinaryViews that are executable.
+ .. warning:: This method **must not** be called directly.
+
+ :return: true if the current BinaryView is executable, false if it is not executable or on error
+ :rtype: bool
+ """
+ return False
+
+[docs] def perform_get_default_endianness(self):
+ """
+ ``perform_get_default_endianness`` implements a check which returns true if the BinaryView is executable.
+
+ .. note:: This method **may** be implemented for custom BinaryViews that are not LittleEndian.
+ .. warning:: This method **must not** be called directly.
+
+ :return: either ``Endianness.LittleEndian`` or ``Endianness.BigEndian``
+ :rtype: Endianness
+ """
+ return Endianness.LittleEndian
+
+[docs] def perform_is_relocatable(self):
+ """
+ ``perform_is_relocatable`` implements a check which returns true if the BinaryView is relocatable. Defaults to
+ True.
+
+ .. note:: This method **may** be implemented for custom BinaryViews that are relocatable.
+ .. warning:: This method **must not** be called directly.
+
+ :return: True if the BinaryView is relocatable, False otherwise
+ :rtype: boolean
+ """
+ return True
+
+[docs] def create_database(self, filename, progress_func=None):
+ """
+ ``create_database`` writes the current database (.bndb) file out to the specified file.
+
+ :param str filename: path and filename to write the bndb to, this string `should` have ".bndb" appended to it.
+ :param callable() progress_func: optional function to be called with the current progress and total count.
+ :return: true on success, false on failure
+ :rtype: bool
+ """
+ return self.file.create_database(filename, progress_func)
+
+[docs] def save_auto_snapshot(self, progress_func=None):
+ """
+ ``save_auto_snapshot`` saves the current database to the already created file.
+
+ .. note:: :py:meth:`create_database` should have been called prior to executing this method
+
+ :param callable() progress_func: optional function to be called with the current progress and total count.
+ :return: True if it successfully saved the snapshot, False otherwise
+ :rtype: bool
+ """
+ return self.file.save_auto_snapshot(progress_func)
+
+[docs] def get_view_of_type(self, name):
+ """
+ ``get_view_of_type`` returns the BinaryView associated with the provided name if it exists.
+
+ :param str name: Name of the view to be retrieved
+ :return: BinaryView object associated with the provided name or None on failure
+ :rtype: BinaryView or None
+ """
+ return self.file.get_view_of_type(name)
+
+[docs] def begin_undo_actions(self):
+ """
+ ``begin_undo_actions`` start recording actions taken so the can be undone at some point.
+
+ :rtype: None
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.begin_undo_actions()
+ >>> bv.convert_to_nop(0x100012f1)
+ True
+ >>> bv.commit_undo_actions()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>> bv.undo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>>
+ """
+ self.file.begin_undo_actions()
+
+[docs] def add_undo_action(self, action):
+ core.BNAddUndoAction(self.handle, action.__class__.name, action._cb)
+
+[docs] def commit_undo_actions(self):
+ """
+ ``commit_undo_actions`` commit the actions taken since the last commit to the undo database.
+
+ :rtype: None
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.begin_undo_actions()
+ >>> bv.convert_to_nop(0x100012f1)
+ True
+ >>> bv.commit_undo_actions()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>> bv.undo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>>
+ """
+ self.file.commit_undo_actions()
+
+[docs] def undo(self):
+ """
+ ``undo`` undo the last committed action in the undo database.
+
+ :rtype: None
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.begin_undo_actions()
+ >>> bv.convert_to_nop(0x100012f1)
+ True
+ >>> bv.commit_undo_actions()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>> bv.undo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.redo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>>
+ """
+ self.file.undo()
+
+[docs] def redo(self):
+ """
+ ``redo`` redo the last committed action in the undo database.
+
+ :rtype: None
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.begin_undo_actions()
+ >>> bv.convert_to_nop(0x100012f1)
+ True
+ >>> bv.commit_undo_actions()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>> bv.undo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.redo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>>
+ """
+ self.file.redo()
+
+
+
+[docs] def read(self, addr, length):
+ """
+ ``read`` returns the data reads at most ``length`` bytes from virtual address ``addr``.
+
+ Note: Python2 returns a str, but Python3 returns a bytes object. str(DataBufferObject) will
+ still get you a str in either case.
+
+ :param int addr: virtual address to read from.
+ :param int length: number of bytes to read.
+ :return: at most ``length`` bytes from the virtual address ``addr``, empty string on error or no data.
+ :rtype: python2 - str; python3 - bytes
+ :Example:
+
+ >>> #Opening a x86_64 Mach-O binary
+ >>> bv = BinaryViewType['Raw'].open("/bin/ls")
+ >>> bv.read(0,4)
+ \'\\xcf\\xfa\\xed\\xfe\'
+ """
+ if (addr < 0) or (length < 0):
+ raise ValueError("length and address must both be positive")
+ buf = databuffer.DataBuffer(handle=core.BNReadViewBuffer(self.handle, addr, length))
+ return bytes(buf)
+
+[docs] def write(self, addr, data):
+ """
+ ``write`` writes the bytes in ``data`` to the virtual address ``addr``.
+
+ :param int addr: virtual address to write to.
+ :param str data: data to be written at addr.
+ :return: number of bytes written to virtual address ``addr``
+ :rtype: int
+ :Example:
+
+ >>> bv.read(0,4)
+ 'BBBB'
+ >>> bv.write(0, "AAAA")
+ 4L
+ >>> bv.read(0,4)
+ 'AAAA'
+ """
+ if not isinstance(data, bytes):
+ if isinstance(data, str):
+ buf = databuffer.DataBuffer(data.encode())
+ else:
+ raise TypeError("Must be bytes or str")
+ else:
+ buf = databuffer.DataBuffer(data)
+ return core.BNWriteViewBuffer(self.handle, addr, buf.handle)
+
+[docs] def insert(self, addr, data):
+ """
+ ``insert`` inserts the bytes in ``data`` to the virtual address ``addr``.
+
+ :param int addr: virtual address to write to.
+ :param str data: data to be inserted at addr.
+ :return: number of bytes inserted to virtual address ``addr``
+ :rtype: int
+ :Example:
+
+ >>> bv.insert(0,"BBBB")
+ 4L
+ >>> bv.read(0,8)
+ 'BBBBAAAA'
+ """
+ if not isinstance(data, bytes):
+ raise TypeError("Must be bytes")
+ buf = databuffer.DataBuffer(data)
+ return core.BNInsertViewBuffer(self.handle, addr, buf.handle)
+
+[docs] def remove(self, addr, length):
+ """
+ ``remove`` removes at most ``length`` bytes from virtual address ``addr``.
+
+ :param int addr: virtual address to remove from.
+ :param int length: number of bytes to remove.
+ :return: number of bytes removed from virtual address ``addr``
+ :rtype: int
+ :Example:
+
+ >>> bv.read(0,8)
+ 'BBBBAAAA'
+ >>> bv.remove(0,4)
+ 4L
+ >>> bv.read(0,4)
+ 'AAAA'
+ """
+ return core.BNRemoveViewData(self.handle, addr, length)
+
+[docs] def get_entropy(self, addr, length, block_size=0):
+ """
+ ``get_entropy`` returns the shannon entropy given the start ``addr``, ``length`` in bytes, and optionally in
+ ``block_size`` chunks.
+
+ :param int addr: virtual address
+ :param int length: total length in bytes
+ :param int block_size: optional block size
+ :return: list of entropy values for each chunk
+ :rtype: list of entropy values or an empty list
+ """
+ result = []
+ if length == 0:
+ return result
+ if block_size == 0:
+ block_size = length
+ data = (ctypes.c_float * ((length // block_size) + 1))()
+ length = core.BNGetEntropy(self.handle, addr, length, block_size, data)
+
+ for i in range(0, length):
+ result.append(float(data[i]))
+ return result
+
+[docs] def get_modification(self, addr, length=None):
+ """
+ ``get_modification`` returns the modified bytes of up to ``length`` bytes from virtual address ``addr``, or if
+ ``length`` is None returns the ModificationStatus.
+
+ :param int addr: virtual address to get modification from
+ :param int length: optional length of modification
+ :return: Either ModificationStatus of the byte at ``addr``, or string of modified bytes at ``addr``
+ :rtype: ModificationStatus or str
+ """
+ if length is None:
+ return ModificationStatus(core.BNGetModification(self.handle, addr))
+ data = (ModificationStatus * length)()
+ length = core.BNGetModificationArray(self.handle, addr, data, length)
+ return data[0:length]
+
+[docs] def is_valid_offset(self, addr):
+ """
+ ``is_valid_offset`` checks if an virtual address ``addr`` is valid .
+
+ :param int addr: a virtual address to be checked
+ :return: true if the virtual address is valid, false if the virtual address is invalid or error
+ :rtype: bool
+ """
+ return core.BNIsValidOffset(self.handle, addr)
+
+[docs] def is_offset_readable(self, addr):
+ """
+ ``is_offset_readable`` checks if an virtual address ``addr`` is valid for reading.
+
+ :param int addr: a virtual address to be checked
+ :return: true if the virtual address is valid for reading, false if the virtual address is invalid or error
+ :rtype: bool
+ """
+ return core.BNIsOffsetReadable(self.handle, addr)
+
+[docs] def is_offset_writable(self, addr):
+ """
+ ``is_offset_writable`` checks if an virtual address ``addr`` is valid for writing.
+
+ :param int addr: a virtual address to be checked
+ :return: true if the virtual address is valid for writing, false if the virtual address is invalid or error
+ :rtype: bool
+ """
+ return core.BNIsOffsetWritable(self.handle, addr)
+
+[docs] def is_offset_executable(self, addr):
+ """
+ ``is_offset_executable`` checks if an virtual address ``addr`` is valid for executing.
+
+ :param int addr: a virtual address to be checked
+ :return: true if the virtual address is valid for executing, false if the virtual address is invalid or error
+ :rtype: bool
+ """
+ return core.BNIsOffsetExecutable(self.handle, addr)
+
+[docs] def is_offset_code_semantics(self, addr):
+ """
+ ``is_offset_code_semantics`` checks if an virtual address ``addr`` is semantically valid for code.
+
+ :param int addr: a virtual address to be checked
+ :return: true if the virtual address is valid for writing, false if the virtual address is invalid or error
+ :rtype: bool
+ """
+ return core.BNIsOffsetCodeSemantics(self.handle, addr)
+
+[docs] def is_offset_extern_semantics(self, addr):
+ """
+ ``is_offset_extern_semantics`` checks if an virtual address ``addr`` is semantically valid for external references.
+
+ :param int addr: a virtual address to be checked
+ :return: true if the virtual address is valid for writing, false if the virtual address is invalid or error
+ :rtype: bool
+ """
+ return core.BNIsOffsetExternSemantics(self.handle, addr)
+
+[docs] def is_offset_writable_semantics(self, addr):
+ """
+ ``is_offset_writable_semantics`` checks if an virtual address ``addr`` is semantically writable. Some sections
+ may have writable permissions for linking purposes but can be treated as read-only for the purposes of
+ analysis.
+
+ :param int addr: a virtual address to be checked
+ :return: true if the virtual address is valid for writing, false if the virtual address is invalid or error
+ :rtype: bool
+ """
+ return core.BNIsOffsetWritableSemantics(self.handle, addr)
+
+[docs] def save(self, dest):
+ """
+ ``save`` saves the original binary file to the provided destination ``dest`` along with any modifications.
+
+ :param str dest: destination path and filename of file to be written
+ :return: boolean True on success, False on failure
+ :rtype: bool
+ """
+ if isinstance(dest, fileaccessor.FileAccessor):
+ return core.BNSaveToFile(self.handle, dest._cb)
+ return core.BNSaveToFilename(self.handle, str(dest))
+
+[docs] def register_notification(self, notify):
+ """
+ `register_notification` provides a mechanism for receiving callbacks for various analysis events. A full
+ list of callbacks can be seen in :py:Class:`BinaryDataNotification`.
+
+ :param BinaryDataNotification notify: notify is a subclassed instance of :py:Class:`BinaryDataNotification`.
+ :rtype: None
+ """
+ cb = BinaryDataNotificationCallbacks(self, notify)
+ cb._register()
+ self.notifications[notify] = cb
+
+[docs] def unregister_notification(self, notify):
+ """
+ `unregister_notification` unregisters the :py:Class:`BinaryDataNotification` object passed to
+ `register_notification`
+
+ :param BinaryDataNotification notify: notify is a subclassed instance of :py:Class:`BinaryDataNotification`.
+ :rtype: None
+ """
+ if notify in self.notifications:
+ self.notifications[notify]._unregister()
+ del self.notifications[notify]
+
+[docs] def add_function(self, addr, plat=None):
+ """
+ ``add_function`` add a new function of the given ``plat`` at the virtual address ``addr``
+
+ :param int addr: virtual address of the function to be added
+ :param Platform plat: Platform for the function to be added
+ :rtype: None
+ :Example:
+
+ >>> bv.add_function(1)
+ >>> bv.functions
+ [<func: x86_64@0x1>]
+
+ """
+ if self.platform is None:
+ raise Exception("Default platform not set in BinaryView")
+ if plat is None:
+ plat = self.platform
+ core.BNAddFunctionForAnalysis(self.handle, plat.handle, addr)
+
+[docs] def add_entry_point(self, addr, plat=None):
+ """
+ ``add_entry_point`` adds an virtual address to start analysis from for a given plat.
+
+ :param int addr: virtual address to start analysis from
+ :param Platform plat: Platform for the entry point analysis
+ :rtype: None
+ :Example:
+ >>> bv.add_entry_point(0xdeadbeef)
+ >>>
+ """
+ if self.platform is None:
+ raise Exception("Default platform not set in BinaryView")
+ if plat is None:
+ plat = self.platform
+ core.BNAddEntryPointForAnalysis(self.handle, plat.handle, addr)
+
+[docs] def remove_function(self, func):
+ """
+ ``remove_function`` removes the function ``func`` from the list of functions
+
+ :param Function func: a Function object.
+ :rtype: None
+ :Example:
+
+ >>> bv.functions
+ [<func: x86_64@0x1>]
+ >>> bv.remove_function(bv.functions[0])
+ >>> bv.functions
+ []
+ """
+ core.BNRemoveAnalysisFunction(self.handle, func.handle)
+
+[docs] def create_user_function(self, addr, plat=None):
+ """
+ ``create_user_function`` add a new *user* function of the given ``plat`` at the virtual address ``addr``
+
+ :param int addr: virtual address of the *user* function to be added
+ :param Platform plat: Platform for the function to be added
+ :rtype: None
+ :Example:
+
+ >>> bv.create_user_function(1)
+ >>> bv.functions
+ [<func: x86_64@0x1>]
+
+ """
+ if plat is None:
+ plat = self.platform
+ core.BNCreateUserFunction(self.handle, plat.handle, addr)
+
+[docs] def remove_user_function(self, func):
+ """
+ ``remove_user_function`` removes the *user* function ``func`` from the list of functions
+
+ :param Function func: a Function object.
+ :rtype: None
+ :Example:
+
+ >>> bv.functions
+ [<func: x86_64@0x1>]
+ >>> bv.remove_user_function(bv.functions[0])
+ >>> bv.functions
+ []
+ """
+ core.BNRemoveUserFunction(self.handle, func.handle)
+
+[docs] def add_analysis_option(self, name):
+ """
+ ``add_analysis_option`` adds an analysis option. Analysis options elaborate the analysis phase. The user must
+ start analysis by calling either ``update_analysis()`` or ``update_analysis_and_wait()``.
+
+ :param str name: name of the analysis option. Available options:
+ "linearsweep" : apply linearsweep analysis during the next analysis update (run-once semantics)
+
+ :rtype: None
+ :Example:
+
+ >>> bv.add_analysis_option("linearsweep")
+ >>> bv.update_analysis_and_wait()
+ """
+ core.BNAddAnalysisOption(self.handle, name)
+
+[docs] def update_analysis(self):
+ """
+ ``update_analysis`` asynchronously starts the analysis running and returns immediately. Analysis of BinaryViews
+ does not occur automatically, the user must start analysis by calling either ``update_analysis()`` or
+ ``update_analysis_and_wait()``. An analysis update **must** be run after changes are made which could change
+ analysis results such as adding functions.
+
+ :rtype: None
+ """
+ core.BNUpdateAnalysis(self.handle)
+
+[docs] def update_analysis_and_wait(self):
+ """
+ ``update_analysis_and_wait`` blocking call to update the analysis, this call returns when the analysis is
+ complete. Analysis of BinaryViews does not occur automatically, the user must start analysis by calling either
+ ``update_analysis()`` or ``update_analysis_and_wait()``. An analysis update **must** be run after changes are
+ made which could change analysis results such as adding functions.
+
+ :rtype: None
+ """
+ core.BNUpdateAnalysisAndWait(self.handle)
+
+[docs] def abort_analysis(self):
+ """
+ ``abort_analysis`` will abort the currently running analysis.
+
+ :rtype: None
+ """
+ core.BNAbortAnalysis(self.handle)
+
+[docs] def define_data_var(self, addr, var_type):
+ """
+ ``define_data_var`` defines a non-user data variable ``var_type`` at the virtual address ``addr``.
+
+ :param int addr: virtual address to define the given data variable
+ :param Type var_type: type to be defined at the given virtual address
+ :rtype: None
+ :Example:
+
+ >>> t = bv.parse_type_string("int foo")
+ >>> t
+ (<type: int32_t>, 'foo')
+ >>> bv.define_data_var(bv.entry_point, t[0])
+ >>>
+ """
+ tc = core.BNTypeWithConfidence()
+ tc.type = var_type.handle
+ tc.confidence = var_type.confidence
+ core.BNDefineDataVariable(self.handle, addr, tc)
+
+[docs] def define_user_data_var(self, addr, var_type):
+ """
+ ``define_user_data_var`` defines a user data variable ``var_type`` at the virtual address ``addr``.
+
+ :param int addr: virtual address to define the given data variable
+ :param binaryninja.Type var_type: type to be defined at the given virtual address
+ :rtype: None
+ :Example:
+
+ >>> t = bv.parse_type_string("int foo")
+ >>> t
+ (<type: int32_t>, 'foo')
+ >>> bv.define_user_data_var(bv.entry_point, t[0])
+ >>>
+ """
+ tc = core.BNTypeWithConfidence()
+ tc.type = var_type.handle
+ tc.confidence = var_type.confidence
+ core.BNDefineUserDataVariable(self.handle, addr, tc)
+
+[docs] def undefine_data_var(self, addr):
+ """
+ ``undefine_data_var`` removes the non-user data variable at the virtual address ``addr``.
+
+ :param int addr: virtual address to define the data variable to be removed
+ :rtype: None
+ :Example:
+
+ >>> bv.undefine_data_var(bv.entry_point)
+ >>>
+ """
+ core.BNUndefineDataVariable(self.handle, addr)
+
+[docs] def undefine_user_data_var(self, addr):
+ """
+ ``undefine_user_data_var`` removes the user data variable at the virtual address ``addr``.
+
+ :param int addr: virtual address to define the data variable to be removed
+ :rtype: None
+ :Example:
+
+ >>> bv.undefine_user_data_var(bv.entry_point)
+ >>>
+ """
+ core.BNUndefineUserDataVariable(self.handle, addr)
+
+[docs] def get_data_var_at(self, addr):
+ """
+ ``get_data_var_at`` returns the data type at a given virtual address.
+
+ :param int addr: virtual address to get the data type from
+ :return: returns the DataVariable at the given virtual address, None on error.
+ :rtype: DataVariable
+ :Example:
+
+ >>> t = bv.parse_type_string("int foo")
+ >>> bv.define_data_var(bv.entry_point, t[0])
+ >>> bv.get_data_var_at(bv.entry_point)
+ <var 0x100001174: int32_t>
+
+ """
+ var = core.BNDataVariable()
+ if not core.BNGetDataVariableAtAddress(self.handle, addr, var):
+ return None
+ return DataVariable(var.address, types.Type(var.type, platform = self.platform, confidence = var.typeConfidence), var.autoDiscovered, self)
+
+[docs] def get_functions_containing(self, addr):
+ """
+ ``get_functions_containing`` returns a list of functions which contain the given address or None on failure.
+
+ :param int addr: virtual address to query.
+ :rtype: list of Function objects or None
+ """
+ basic_blocks = self.get_basic_blocks_at(addr)
+ if len(basic_blocks) == 0:
+ return None
+
+ result = []
+ for block in basic_blocks:
+ result.append(block.function)
+ return result
+
+[docs] def get_function_at(self, addr, plat=None):
+ """
+ ``get_function_at`` gets a Function object for the function that starts at virtual address ``addr``:
+
+ :param int addr: starting virtual address of the desired function
+ :param Platform plat: plat of the desired function
+ :return: returns a Function object or None for the function at the virtual address provided
+ :rtype: Function
+ :Example:
+
+ >>> bv.get_function_at(bv.entry_point)
+ <func: x86_64@0x100001174>
+ >>>
+ """
+ if plat is None:
+ plat = self.platform
+ if plat is None:
+ return None
+ func = core.BNGetAnalysisFunction(self.handle, plat.handle, addr)
+ if func is None:
+ return None
+ return binaryninja.function.Function(self, func)
+
+[docs] def get_functions_at(self, addr):
+ """
+ ``get_functions_at`` get a list of binaryninja.Function objects (one for each valid plat) at the given
+ virtual address. Binary Ninja does not limit the number of platforms in a given file thus there may be multiple
+ functions defined from different architectures at the same location. This API allows you to query all of valid
+ platforms.
+
+ :param int addr: virtual address of the desired Function object list.
+ :return: a list of binaryninja.Function objects defined at the provided virtual address
+ :rtype: list(Function)
+ """
+ count = ctypes.c_ulonglong(0)
+ funcs = core.BNGetAnalysisFunctionsForAddress(self.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(binaryninja.function.Function(self, core.BNNewFunctionReference(funcs[i])))
+ core.BNFreeFunctionList(funcs, count.value)
+ return result
+
+[docs] def get_recent_function_at(self, addr):
+ func = core.BNGetRecentAnalysisFunctionForAddress(self.handle, addr)
+ if func is None:
+ return None
+ return binaryninja.function.Function(self, func)
+
+[docs] def get_basic_blocks_at(self, addr):
+ """
+ ``get_basic_blocks_at`` get a list of :py:Class:`BasicBlock` objects which exist at the provided virtual address.
+
+ :param int addr: virtual address of BasicBlock desired
+ :return: a list of :py:Class:`BasicBlock` objects
+ :rtype: list(BasicBlock)
+ """
+ count = ctypes.c_ulonglong(0)
+ blocks = core.BNGetBasicBlocksForAddress(self.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(basicblock.BasicBlock(core.BNNewBasicBlockReference(blocks[i]), self))
+ core.BNFreeBasicBlockList(blocks, count.value)
+ return result
+
+[docs] def get_basic_blocks_starting_at(self, addr):
+ """
+ ``get_basic_blocks_starting_at`` get a list of :py:Class:`BasicBlock` objects which start at the provided virtual address.
+
+ :param int addr: virtual address of BasicBlock desired
+ :return: a list of :py:Class:`BasicBlock` objects
+ :rtype: list(BasicBlock)
+ """
+ count = ctypes.c_ulonglong(0)
+ blocks = core.BNGetBasicBlocksStartingAtAddress(self.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(basicblock.BasicBlock(core.BNNewBasicBlockReference(blocks[i]), self))
+ core.BNFreeBasicBlockList(blocks, count.value)
+ return result
+
+[docs] def get_recent_basic_block_at(self, addr):
+ block = core.BNGetRecentBasicBlockForAddress(self.handle, addr)
+ if block is None:
+ return None
+ return basicblock.BasicBlock(block, self)
+
+[docs] def get_code_refs(self, addr, length=None):
+ """
+ ``get_code_refs`` returns a list of ReferenceSource objects (xrefs or cross-references) that point to the provided virtual address.
+
+ :param int addr: virtual address to query for references
+ :return: List of References for the given virtual address
+ :rtype: list(ReferenceSource)
+ :Example:
+
+ >>> bv.get_code_refs(here)
+ [<ref: x86@0x4165ff>]
+ >>>
+
+ """
+ count = ctypes.c_ulonglong(0)
+ if length is None:
+ refs = core.BNGetCodeReferences(self.handle, addr, count)
+ else:
+ refs = core.BNGetCodeReferencesInRange(self.handle, addr, length, count)
+ result = []
+ for i in range(0, count.value):
+ if refs[i].func:
+ func = binaryninja.function.Function(self, core.BNNewFunctionReference(refs[i].func))
+ else:
+ func = None
+ if refs[i].arch:
+ arch = binaryninja.architecture.CoreArchitecture._from_cache(refs[i].arch)
+ else:
+ arch = None
+ addr = refs[i].addr
+ result.append(binaryninja.architecture.ReferenceSource(func, arch, addr))
+ core.BNFreeCodeReferences(refs, count.value)
+ return result
+
+[docs] def get_data_refs(self, addr, length=None):
+ """
+ ``get_data_refs`` returns a list of virtual addresses of data which references ``addr``. Optionally specifying
+ a length. When ``length`` is set ``get_data_refs`` returns the data which references in the range ``addr``-``addr``+``length``.
+
+ :param int addr: virtual address to query for references
+ :param int length: optional length of query
+ :return: list of integers
+ :rtype: list(integer)
+ :Example:
+
+ >>> bv.get_data_refs(here)
+ [4203812]
+ >>>
+ """
+ count = ctypes.c_ulonglong(0)
+ if length is None:
+ refs = core.BNGetDataReferences(self.handle, addr, count)
+ else:
+ refs = core.BNGetDataReferencesInRange(self.handle, addr, length, count)
+
+ result = []
+ for i in range(0, count.value):
+ result.append(refs[i])
+ core.BNFreeDataReferences(refs, count.value)
+ return result
+
+[docs] def get_data_refs_from(self, addr, length=None):
+ """
+ ``get_data_refs_from`` returns a list of virtual addresses referenced by the address ``addr``. Optionally specifying
+ a length. When ``length`` is set ``get_data_refs_from`` returns the data referenced in the range ``addr``-``addr``+``length``.
+
+ :param int addr: virtual address to query for references
+ :param int length: optional length of query
+ :return: list of integers
+ :rtype: list(integer)
+ :Example:
+
+ >>> bv.get_data_refs_from(here)
+ [4200327]
+ >>>
+ """
+ count = ctypes.c_ulonglong(0)
+ if length is None:
+ refs = core.BNGetDataReferencesFrom(self.handle, addr, count)
+ else:
+ refs = core.BNGetDataReferencesFromInRange(self.handle, addr, length, count)
+
+ result = []
+ for i in range(0, count.value):
+ result.append(refs[i])
+ core.BNFreeDataReferences(refs, count.value)
+ return result
+
+
+[docs] def get_symbol_at(self, addr, namespace=None):
+ """
+ ``get_symbol_at`` returns the Symbol at the provided virtual address.
+
+ :param int addr: virtual address to query for symbol
+ :return: Symbol for the given virtual address
+ :param NameSpace namespace: the namespace of the symbols to retrieve
+ :rtype: Symbol
+ :Example:
+
+ >>> bv.get_symbol_at(bv.entry_point)
+ <FunctionSymbol: "_start" @ 0x100001174>
+ >>>
+ """
+ if isinstance(namespace, str):
+ namespace = types.NameSpace(namespace)
+ if isinstance(namespace, types.NameSpace):
+ namespace = namespace._get_core_struct()
+
+ sym = core.BNGetSymbolByAddress(self.handle, addr, namespace)
+ if sym is None:
+ return None
+ return types.Symbol(None, None, None, handle = sym)
+
+[docs] def get_symbol_by_raw_name(self, name, namespace=None):
+ """
+ ``get_symbol_by_raw_name`` retrieves a Symbol object for the given a raw (mangled) name.
+
+ :param str name: raw (mangled) name of Symbol to be retrieved
+ :return: Symbol object corresponding to the provided raw name
+ :param NameSpace namespace: the namespace to search for the given symbol
+ :rtype: Symbol
+ :Example:
+
+ >>> bv.get_symbol_by_raw_name('?testf@Foobar@@SA?AW4foo@1@W421@@Z')
+ <FunctionSymbol: "public: static enum Foobar::foo __cdecl Foobar::testf(enum Foobar::foo)" @ 0x10001100>
+ >>>
+ """
+ if isinstance(namespace, str):
+ namespace = types.NameSpace(namespace)
+ if isinstance(namespace, types.NameSpace):
+ namespace = namespace._get_core_struct()
+ sym = core.BNGetSymbolByRawName(self.handle, name, namespace)
+ if sym is None:
+ return None
+ return types.Symbol(None, None, None, handle = sym)
+
+[docs] def get_symbols_by_name(self, name, namespace=None):
+ """
+ ``get_symbols_by_name`` retrieves a list of Symbol objects for the given symbol name.
+
+ :param str name: name of Symbol object to be retrieved
+ :return: Symbol object corresponding to the provided name
+ :param NameSpace namespace: the namespace of the symbol
+ :rtype: Symbol
+ :Example:
+
+ >>> bv.get_symbols_by_name('?testf@Foobar@@SA?AW4foo@1@W421@@Z')
+ [<FunctionSymbol: "public: static enum Foobar::foo __cdecl Foobar::testf(enum Foobar::foo)" @ 0x10001100>]
+ >>>
+ """
+ if isinstance(namespace, str):
+ namespace = types.NameSpace(namespace)
+ if isinstance(namespace, types.NameSpace):
+ namespace = namespace._get_core_struct()
+ count = ctypes.c_ulonglong(0)
+ syms = core.BNGetSymbolsByName(self.handle, name, count, namespace)
+ result = []
+ for i in range(0, count.value):
+ result.append(types.Symbol(None, None, None, handle = core.BNNewSymbolReference(syms[i])))
+ core.BNFreeSymbolList(syms, count.value)
+ return result
+
+[docs] def get_symbols(self, start=None, length=None, namespace=None):
+ """
+ ``get_symbols`` retrieves the list of all Symbol objects in the optionally provided range.
+
+ :param int start: optional start virtual address
+ :param int length: optional length
+ :return: list of all Symbol objects, or those Symbol objects in the range of ``start``-``start+length``
+ :rtype: list(Symbol)
+ :Example:
+
+ >>> bv.get_symbols(0x1000200c, 1)
+ [<ImportAddressSymbol: "KERNEL32!IsProcessorFeaturePresent@IAT" @ 0x1000200c>]
+ >>>
+ """
+ count = ctypes.c_ulonglong(0)
+ if isinstance(namespace, str):
+ namespace = types.NameSpace(namespace)
+ if isinstance(namespace, types.NameSpace):
+ namespace = namespace._get_core_struct()
+ if start is None:
+ syms = core.BNGetSymbols(self.handle, count, namespace)
+ else:
+ syms = core.BNGetSymbolsInRange(self.handle, start, length, count, namespace)
+ result = []
+ for i in range(0, count.value):
+ result.append(types.Symbol(None, None, None, handle = core.BNNewSymbolReference(syms[i])))
+ core.BNFreeSymbolList(syms, count.value)
+ return result
+
+[docs] def get_symbols_of_type(self, sym_type, start=None, length=None, namespace=None):
+ """
+ ``get_symbols_of_type`` retrieves a list of all Symbol objects of the provided symbol type in the optionally
+ provided range.
+
+ :param SymbolType sym_type: A Symbol type: :py:Class:`Symbol`.
+ :param int start: optional start virtual address
+ :param int length: optional length
+ :return: list of all Symbol objects of type sym_type, or those Symbol objects in the range of ``start``-``start+length``
+ :rtype: list(Symbol)
+ :Example:
+
+ >>> bv.get_symbols_of_type(SymbolType.ImportAddressSymbol, 0x10002028, 1)
+ [<ImportAddressSymbol: "KERNEL32!GetCurrentThreadId@IAT" @ 0x10002028>]
+ >>>
+ """
+ if isinstance(sym_type, str):
+ sym_type = SymbolType[sym_type]
+ if isinstance(namespace, str):
+ namespace = types.NameSpace(namespace)
+ if isinstance(namespace, types.NameSpace):
+ namespace = namespace._get_core_struct()
+ count = ctypes.c_ulonglong(0)
+ if start is None:
+ syms = core.BNGetSymbolsOfType(self.handle, sym_type, count, namespace)
+ else:
+ syms = core.BNGetSymbolsOfTypeInRange(self.handle, sym_type, start, length, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(types.Symbol(None, None, None, handle = core.BNNewSymbolReference(syms[i])))
+ core.BNFreeSymbolList(syms, count.value)
+ return result
+
+[docs] def define_auto_symbol(self, sym):
+ """
+ ``define_auto_symbol`` adds a symbol to the internal list of automatically discovered Symbol objects in a given
+ namespace.
+
+ .. warning:: If multiple symbols for the same address are defined, only the most recent symbol will ever be used.
+
+ :param Symbol sym: the symbol to define
+ :rtype: None
+ """
+ core.BNDefineAutoSymbol(self.handle, sym.handle)
+
+[docs] def define_auto_symbol_and_var_or_function(self, sym, sym_type, plat=None):
+ """
+ ``define_auto_symbol_and_var_or_function``
+
+ .. warning:: If multiple symbols for the same address are defined, only the most recent symbol will ever be used.
+
+ :param Symbol sym: the symbol to define
+ :param SymbolType sym_type: Type of symbol being defined
+ :param Platform plat: (optional) platform
+ :rtype: None
+ """
+ if plat is None:
+ plat = self.plat
+ if plat is not None:
+ plat = plat.handle
+ if sym_type is not None:
+ sym_type = sym_type.handle
+ core.BNDefineAutoSymbolAndVariableOrFunction(self.handle, plat, sym.handle, sym_type)
+
+[docs] def undefine_auto_symbol(self, sym):
+ """
+ ``undefine_auto_symbol`` removes a symbol from the internal list of automatically discovered Symbol objects.
+
+ :param Symbol sym: the symbol to undefine
+ :rtype: None
+ """
+ core.BNUndefineAutoSymbol(self.handle, sym.handle)
+
+[docs] def define_user_symbol(self, sym):
+ """
+ ``define_user_symbol`` adds a symbol to the internal list of user added Symbol objects.
+
+ .. warning:: If multiple symbols for the same address are defined, only the most recent symbol will ever be used.
+
+ :param Symbol sym: the symbol to define
+ :rtype: None
+ """
+ core.BNDefineUserSymbol(self.handle, sym.handle)
+
+[docs] def undefine_user_symbol(self, sym):
+ """
+ ``undefine_user_symbol`` removes a symbol from the internal list of user added Symbol objects.
+
+ :param Symbol sym: the symbol to undefine
+ :rtype: None
+ """
+ core.BNUndefineUserSymbol(self.handle, sym.handle)
+
+[docs] def define_imported_function(self, import_addr_sym, func):
+ """
+ ``define_imported_function`` defines an imported Function ``func`` with a ImportedFunctionSymbol type.
+
+ :param Symbol import_addr_sym: A Symbol object with type ImportedFunctionSymbol
+ :param Function func: A Function object to define as an imported function
+ :rtype: None
+ """
+ core.BNDefineImportedFunction(self.handle, import_addr_sym.handle, func.handle)
+
+[docs] def is_never_branch_patch_available(self, addr, arch=None):
+ """
+ ``is_never_branch_patch_available`` queries the architecture plugin to determine if the instruction at the
+ instruction at ``addr`` can be made to **never branch**. The actual logic of which is implemented in the
+ ``perform_is_never_branch_patch_available`` in the corresponding architecture.
+
+ :param int addr: the virtual address of the instruction to be patched
+ :param Architecture arch: (optional) the architecture of the instructions if different from the default
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> bv.get_disassembly(0x100012ed)
+ 'test eax, eax'
+ >>> bv.is_never_branch_patch_available(0x100012ed)
+ False
+ >>> bv.get_disassembly(0x100012ef)
+ 'jg 0x100012f5'
+ >>> bv.is_never_branch_patch_available(0x100012ef)
+ True
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNIsNeverBranchPatchAvailable(self.handle, arch.handle, addr)
+
+[docs] def is_always_branch_patch_available(self, addr, arch=None):
+ """
+ ``is_always_branch_patch_available`` queries the architecture plugin to determine if the
+ instruction at ``addr`` can be made to **always branch**. The actual logic of which is implemented in the
+ ``perform_is_always_branch_patch_available`` in the corresponding architecture.
+
+ :param int addr: the virtual address of the instruction to be patched
+ :param Architecture arch: (optional) the architecture for the current view
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> bv.get_disassembly(0x100012ed)
+ 'test eax, eax'
+ >>> bv.is_always_branch_patch_available(0x100012ed)
+ False
+ >>> bv.get_disassembly(0x100012ef)
+ 'jg 0x100012f5'
+ >>> bv.is_always_branch_patch_available(0x100012ef)
+ True
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNIsAlwaysBranchPatchAvailable(self.handle, arch.handle, addr)
+
+[docs] def is_invert_branch_patch_available(self, addr, arch=None):
+ """
+ ``is_invert_branch_patch_available`` queries the architecture plugin to determine if the instruction at ``addr``
+ is a branch that can be inverted. The actual logic of which is implemented in the
+ ``perform_is_invert_branch_patch_available`` in the corresponding architecture.
+
+ :param int addr: the virtual address of the instruction to be patched
+ :param Architecture arch: (optional) the architecture of the instructions if different from the default
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> bv.get_disassembly(0x100012ed)
+ 'test eax, eax'
+ >>> bv.is_invert_branch_patch_available(0x100012ed)
+ False
+ >>> bv.get_disassembly(0x100012ef)
+ 'jg 0x100012f5'
+ >>> bv.is_invert_branch_patch_available(0x100012ef)
+ True
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNIsInvertBranchPatchAvailable(self.handle, arch.handle, addr)
+
+[docs] def is_skip_and_return_zero_patch_available(self, addr, arch=None):
+ """
+ ``is_skip_and_return_zero_patch_available`` queries the architecture plugin to determine if the
+ instruction at ``addr`` is similar to an x86 "call" instruction which can be made to return zero. The actual
+ logic of which is implemented in the ``perform_is_skip_and_return_zero_patch_available`` in the corresponding
+ architecture.
+
+ :param int addr: the virtual address of the instruction to be patched
+ :param Architecture arch: (optional) the architecture of the instructions if different from the default
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f6)
+ 'mov dword [0x10003020], eax'
+ >>> bv.is_skip_and_return_zero_patch_available(0x100012f6)
+ False
+ >>> bv.get_disassembly(0x100012fb)
+ 'call 0x10001629'
+ >>> bv.is_skip_and_return_zero_patch_available(0x100012fb)
+ True
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNIsSkipAndReturnZeroPatchAvailable(self.handle, arch.handle, addr)
+
+[docs] def is_skip_and_return_value_patch_available(self, addr, arch=None):
+ """
+ ``is_skip_and_return_value_patch_available`` queries the architecture plugin to determine if the
+ instruction at ``addr`` is similar to an x86 "call" instruction which can be made to return a value. The actual
+ logic of which is implemented in the ``perform_is_skip_and_return_value_patch_available`` in the corresponding
+ architecture.
+
+ :param int addr: the virtual address of the instruction to be patched
+ :param Architecture arch: (optional) the architecture of the instructions if different from the default
+ :return: True if the instruction can be patched, False otherwise
+ :rtype: bool
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f6)
+ 'mov dword [0x10003020], eax'
+ >>> bv.is_skip_and_return_value_patch_available(0x100012f6)
+ False
+ >>> bv.get_disassembly(0x100012fb)
+ 'call 0x10001629'
+ >>> bv.is_skip_and_return_value_patch_available(0x100012fb)
+ True
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNIsSkipAndReturnValuePatchAvailable(self.handle, arch.handle, addr)
+
+[docs] def convert_to_nop(self, addr, arch=None):
+ """
+ ``convert_to_nop`` converts the instruction at virtual address ``addr`` to a nop of the provided architecture.
+
+ .. note:: This API performs a binary patch, analysis may need to be updated afterward. Additionally the binary\
+ file must be saved in order to preserve the changes made.
+
+ :param int addr: virtual address of the instruction to convert to nops
+ :param Architecture arch: (optional) the architecture of the instructions if different from the default
+ :return: True on success, False on failure.
+ :rtype: bool
+ :Example:
+
+ >>> bv.get_disassembly(0x100012fb)
+ 'call 0x10001629'
+ >>> bv.convert_to_nop(0x100012fb)
+ True
+ >>> #The above 'call' instruction is 5 bytes, a nop in x86 is 1 byte,
+ >>> # thus 5 nops are used:
+ >>> bv.get_disassembly(0x100012fb)
+ 'nop'
+ >>> bv.get_next_disassembly()
+ 'nop'
+ >>> bv.get_next_disassembly()
+ 'nop'
+ >>> bv.get_next_disassembly()
+ 'nop'
+ >>> bv.get_next_disassembly()
+ 'nop'
+ >>> bv.get_next_disassembly()
+ 'mov byte [ebp-0x1c], al'
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNConvertToNop(self.handle, arch.handle, addr)
+
+[docs] def always_branch(self, addr, arch=None):
+ """
+ ``always_branch`` convert the instruction of architecture ``arch`` at the virtual address ``addr`` to an
+ unconditional branch.
+
+ .. note:: This API performs a binary patch, analysis may need to be updated afterward. Additionally the binary\
+ file must be saved in order to preserve the changes made.
+
+ :param int addr: virtual address of the instruction to be modified
+ :param Architecture arch: (optional) the architecture of the instructions if different from the default
+ :return: True on success, False on failure.
+ :rtype: bool
+ :Example:
+
+ >>> bv.get_disassembly(0x100012ef)
+ 'jg 0x100012f5'
+ >>> bv.always_branch(0x100012ef)
+ True
+ >>> bv.get_disassembly(0x100012ef)
+ 'jmp 0x100012f5'
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNAlwaysBranch(self.handle, arch.handle, addr)
+
+[docs] def never_branch(self, addr, arch=None):
+ """
+ ``never_branch`` convert the branch instruction of architecture ``arch`` at the virtual address ``addr`` to
+ a fall through.
+
+ .. note:: This API performs a binary patch, analysis may need to be updated afterward. Additionally the binary\
+ file must be saved in order to preserve the changes made.
+
+ :param int addr: virtual address of the instruction to be modified
+ :param Architecture arch: (optional) the architecture of the instructions if different from the default
+ :return: True on success, False on failure.
+ :rtype: bool
+ :Example:
+
+ >>> bv.get_disassembly(0x1000130e)
+ 'jne 0x10001317'
+ >>> bv.never_branch(0x1000130e)
+ True
+ >>> bv.get_disassembly(0x1000130e)
+ 'nop'
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNConvertToNop(self.handle, arch.handle, addr)
+
+[docs] def invert_branch(self, addr, arch=None):
+ """
+ ``invert_branch`` convert the branch instruction of architecture ``arch`` at the virtual address ``addr`` to the
+ inverse branch.
+
+ .. note:: This API performs a binary patch, analysis may need to be updated afterward. Additionally the binary
+ file must be saved in order to preserve the changes made.
+
+ :param int addr: virtual address of the instruction to be modified
+ :param Architecture arch: (optional) the architecture of the instructions if different from the default
+ :return: True on success, False on failure.
+ :rtype: bool
+ :Example:
+
+ >>> bv.get_disassembly(0x1000130e)
+ 'je 0x10001317'
+ >>> bv.invert_branch(0x1000130e)
+ True
+ >>>
+ >>> bv.get_disassembly(0x1000130e)
+ 'jne 0x10001317'
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNInvertBranch(self.handle, arch.handle, addr)
+
+[docs] def skip_and_return_value(self, addr, value, arch=None):
+ """
+ ``skip_and_return_value`` convert the ``call`` instruction of architecture ``arch`` at the virtual address
+ ``addr`` to the equivalent of returning a value.
+
+ :param int addr: virtual address of the instruction to be modified
+ :param int value: value to make the instruction *return*
+ :param Architecture arch: (optional) the architecture of the instructions if different from the default
+ :return: True on success, False on failure.
+ :rtype: bool
+ :Example:
+
+ >>> bv.get_disassembly(0x1000132a)
+ 'call 0x1000134a'
+ >>> bv.skip_and_return_value(0x1000132a, 42)
+ True
+ >>> #The return value from x86 functions is stored in eax thus:
+ >>> bv.get_disassembly(0x1000132a)
+ 'mov eax, 0x2a'
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNSkipAndReturnValue(self.handle, arch.handle, addr, value)
+
+[docs] def get_instruction_length(self, addr, arch=None):
+ """
+ ``get_instruction_length`` returns the number of bytes in the instruction of Architecture ``arch`` at the virtual
+ address ``addr``
+
+ :param int addr: virtual address of the instruction query
+ :param Architecture arch: (optional) the architecture of the instructions if different from the default
+ :return: Number of bytes in instruction
+ :rtype: int
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.get_instruction_length(0x100012f1)
+ 2L
+ >>>
+ """
+ if arch is None:
+ arch = self.arch
+ return core.BNGetInstructionLength(self.handle, arch.handle, addr)
+
+[docs] def notify_data_written(self, offset, length):
+ core.BNNotifyDataWritten(self.handle, offset, length)
+
+[docs] def notify_data_inserted(self, offset, length):
+ core.BNNotifyDataInserted(self.handle, offset, length)
+
+[docs] def notify_data_removed(self, offset, length):
+ core.BNNotifyDataRemoved(self.handle, offset, length)
+
+[docs] def get_strings(self, start = None, length = None):
+ """
+ ``get_strings`` returns a list of strings defined in the binary in the optional virtual address range:
+ ``start-(start+length)``
+
+ :param int start: optional virtual address to start the string list from, defaults to start of the binary
+ :param int length: optional length range to return strings from, defaults to length of the binary
+ :return: a list of all strings or a list of strings defined between ``start`` and ``start+length``
+ :rtype: list(str())
+ :Example:
+
+ >>> bv.get_strings(0x1000004d, 1)
+ [<AsciiString: 0x1000004d, len 0x2c>]
+ >>>
+ """
+ count = ctypes.c_ulonglong(0)
+ if start is None:
+ strings = core.BNGetStrings(self.handle, count)
+ else:
+ if length is None:
+ length = self.end - start
+ strings = core.BNGetStringsInRange(self.handle, start, length, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(StringReference(self, StringType(strings[i].type), strings[i].start, strings[i].length))
+ core.BNFreeStringReferenceList(strings)
+ return result
+
+[docs] def add_analysis_completion_event(self, callback):
+ """
+ ``add_analysis_completion_event`` sets up a call back function to be called when analysis has been completed.
+ This is helpful when using ``update_analysis`` which does not wait for analysis completion before returning.
+
+ The callee of this function is not responsible for maintaining the lifetime of the returned AnalysisCompletionEvent object.
+
+ :param callable() callback: A function to be called with no parameters when analysis has completed.
+ :return: An initialized AnalysisCompletionEvent object.
+ :rtype: AnalysisCompletionEvent
+ :Example:
+
+ >>> def completionEvent():
+ ... print("done")
+ ...
+ >>> bv.add_analysis_completion_event(completionEvent)
+ <binaryninja.AnalysisCompletionEvent object at 0x10a2c9f10>
+ >>> bv.update_analysis()
+ done
+ >>>
+ """
+ return AnalysisCompletionEvent(self, callback)
+
+[docs] def get_next_function_start_after(self, addr):
+ """
+ ``get_next_function_start_after`` returns the virtual address of the Function that occurs after the virtual address
+ ``addr``
+
+ :param int addr: the virtual address to start looking from.
+ :return: the virtual address of the next Function
+ :rtype: int
+ :Example:
+
+ >>> bv.get_next_function_start_after(bv.entry_point)
+ 268441061L
+ >>> hex(bv.get_next_function_start_after(bv.entry_point))
+ '0x100015e5L'
+ >>> hex(bv.get_next_function_start_after(0x100015e5))
+ '0x10001629L'
+ >>> hex(bv.get_next_function_start_after(0x10001629))
+ '0x1000165eL'
+ >>>
+ """
+ return core.BNGetNextFunctionStartAfterAddress(self.handle, addr)
+
+[docs] def get_next_basic_block_start_after(self, addr):
+ """
+ ``get_next_basic_block_start_after`` returns the virtual address of the BasicBlock that occurs after the virtual
+ address ``addr``
+
+ :param int addr: the virtual address to start looking from.
+ :return: the virtual address of the next BasicBlock
+ :rtype: int
+ :Example:
+
+ >>> hex(bv.get_next_basic_block_start_after(bv.entry_point))
+ '0x100014a8L'
+ >>> hex(bv.get_next_basic_block_start_after(0x100014a8))
+ '0x100014adL'
+ >>>
+ """
+ return core.BNGetNextBasicBlockStartAfterAddress(self.handle, addr)
+
+[docs] def get_next_data_after(self, addr):
+ """
+ ``get_next_data_after`` retrieves the virtual address of the next non-code byte.
+
+ :param int addr: the virtual address to start looking from.
+ :return: the virtual address of the next data byte which is data, not code
+ :rtype: int
+ :Example:
+
+ >>> hex(bv.get_next_data_after(0x10000000))
+ '0x10000001L'
+ """
+ return core.BNGetNextDataAfterAddress(self.handle, addr)
+
+[docs] def get_next_data_var_after(self, addr):
+ """
+ ``get_next_data_var_after`` retrieves the next virtual address of the next :py:Class:`DataVariable`
+
+ :param int addr: the virtual address to start looking from.
+ :return: the virtual address of the next :py:Class:`DataVariable`
+ :rtype: int
+ :Example:
+
+ >>> hex(bv.get_next_data_var_after(0x10000000))
+ '0x1000003cL'
+ >>> bv.get_data_var_at(0x1000003c)
+ <var 0x1000003c: int32_t>
+ >>>
+ """
+ return core.BNGetNextDataVariableAfterAddress(self.handle, addr)
+
+[docs] def get_previous_function_start_before(self, addr):
+ """
+ ``get_previous_function_start_before`` returns the virtual address of the Function that occurs prior to the
+ virtual address provided
+
+ :param int addr: the virtual address to start looking from.
+ :return: the virtual address of the previous Function
+ :rtype: int
+ :Example:
+
+ >>> hex(bv.entry_point)
+ '0x1000149fL'
+ >>> hex(bv.get_next_function_start_after(bv.entry_point))
+ '0x100015e5L'
+ >>> hex(bv.get_previous_function_start_before(0x100015e5))
+ '0x1000149fL'
+ >>>
+ """
+ return core.BNGetPreviousFunctionStartBeforeAddress(self.handle, addr)
+
+[docs] def get_previous_basic_block_start_before(self, addr):
+ """
+ ``get_previous_basic_block_start_before`` returns the virtual address of the BasicBlock that occurs prior to the
+ provided virtual address
+
+ :param int addr: the virtual address to start looking from.
+ :return: the virtual address of the previous BasicBlock
+ :rtype: int
+ :Example:
+
+ >>> hex(bv.entry_point)
+ '0x1000149fL'
+ >>> hex(bv.get_next_basic_block_start_after(bv.entry_point))
+ '0x100014a8L'
+ >>> hex(bv.get_previous_basic_block_start_before(0x100014a8))
+ '0x1000149fL'
+ >>>
+ """
+ return core.BNGetPreviousBasicBlockStartBeforeAddress(self.handle, addr)
+
+[docs] def get_previous_basic_block_end_before(self, addr):
+ """
+ ``get_previous_basic_block_end_before``
+
+ :param int addr: the virtual address to start looking from.
+ :return: the virtual address of the previous BasicBlock end
+ :rtype: int
+ :Example:
+ >>> hex(bv.entry_point)
+ '0x1000149fL'
+ >>> hex(bv.get_next_basic_block_start_after(bv.entry_point))
+ '0x100014a8L'
+ >>> hex(bv.get_previous_basic_block_end_before(0x100014a8))
+ '0x100014a8L'
+ """
+ return core.BNGetPreviousBasicBlockEndBeforeAddress(self.handle, addr)
+
+[docs] def get_previous_data_before(self, addr):
+ """
+ ``get_previous_data_before``
+
+ :param int addr: the virtual address to start looking from.
+ :return: the virtual address of the previous data (non-code) byte
+ :rtype: int
+ :Example:
+
+ >>> hex(bv.get_previous_data_before(0x1000001))
+ '0x1000000L'
+ >>>
+ """
+ return core.BNGetPreviousDataBeforeAddress(self.handle, addr)
+
+[docs] def get_previous_data_var_before(self, addr):
+ """
+ ``get_previous_data_var_before``
+
+ :param int addr: the virtual address to start looking from.
+ :return: the virtual address of the previous :py:Class:`DataVariable`
+ :rtype: int
+ :Example:
+
+ >>> hex(bv.get_previous_data_var_before(0x1000003c))
+ '0x10000000L'
+ >>> bv.get_data_var_at(0x10000000)
+ <var 0x10000000: int16_t>
+ >>>
+ """
+ return core.BNGetPreviousDataVariableBeforeAddress(self.handle, addr)
+
+[docs] def get_linear_disassembly_position_at(self, addr, settings):
+ """
+ ``get_linear_disassembly_position_at`` instantiates a :py:class:`LinearDisassemblyPosition` object for use in
+ :py:meth:`get_previous_linear_disassembly_lines` or :py:meth:`get_next_linear_disassembly_lines`.
+
+ :param int addr: virtual address of linear disassembly position
+ :param DisassemblySettings settings: an instantiated :py:class:`DisassemblySettings` object
+ :return: An instantiated :py:class:`LinearDisassemblyPosition` object for the provided virtual address
+ :rtype: LinearDisassemblyPosition
+ :Example:
+
+ >>> settings = DisassemblySettings()
+ >>> pos = bv.get_linear_disassembly_position_at(0x1000149f, settings)
+ >>> lines = bv.get_previous_linear_disassembly_lines(pos, settings)
+ >>> lines
+ [<0x1000149a: pop esi>, <0x1000149b: pop ebp>,
+ <0x1000149c: retn 0xc>, <0x1000149f: >]
+ """
+ if settings is not None:
+ settings = settings.handle
+ pos = core.BNGetLinearDisassemblyPositionForAddress(self.handle, addr, settings)
+ func = None
+ block = None
+ if pos.function:
+ func = binaryninja.function.Function(self, pos.function)
+ if pos.block:
+ block = basicblock.BasicBlock(pos.block, self)
+ return lineardisassembly.LinearDisassemblyPosition(func, block, pos.address)
+
+ def _get_linear_disassembly_lines(self, api, pos, settings):
+ pos_obj = core.BNLinearDisassemblyPosition()
+ pos_obj.function = None
+ pos_obj.block = None
+ pos_obj.address = pos.address
+ if pos.function is not None:
+ pos_obj.function = core.BNNewFunctionReference(pos.function.handle)
+ if pos.block is not None:
+ pos_obj.block = core.BNNewBasicBlockReference(pos.block.handle)
+
+ if settings is not None:
+ settings = settings.handle
+
+ count = ctypes.c_ulonglong(0)
+ lines = api(self.handle, pos_obj, settings, count)
+
+ result = []
+ for i in range(0, count.value):
+ func = None
+ block = None
+ if lines[i].function:
+ func = binaryninja.function.Function(self, core.BNNewFunctionReference(lines[i].function))
+ if lines[i].block:
+ block = basicblock.BasicBlock(core.BNNewBasicBlockReference(lines[i].block), self)
+ color = highlight.HighlightColor._from_core_struct(lines[i].contents.highlight)
+ addr = lines[i].contents.addr
+ tokens = binaryninja.function.InstructionTextToken.get_instruction_lines(lines[i].contents.tokens, lines[i].contents.count)
+ contents = binaryninja.function.DisassemblyTextLine(tokens, addr, color = color)
+ result.append(lineardisassembly.LinearDisassemblyLine(lines[i].type, func, block, lines[i].lineOffset, contents))
+
+ func = None
+ block = None
+ if pos_obj.function:
+ func = binaryninja.function.Function(self, pos_obj.function)
+ if pos_obj.block:
+ block = basicblock.BasicBlock(pos_obj.block, self)
+ pos.function = func
+ pos.block = block
+ pos.address = pos_obj.address
+
+ core.BNFreeLinearDisassemblyLines(lines, count.value)
+ return result
+
+[docs] def get_previous_linear_disassembly_lines(self, pos, settings):
+ """
+ ``get_previous_linear_disassembly_lines`` retrieves a list of :py:class:`LinearDisassemblyLine` objects for the
+ previous disassembly lines, and updates the LinearDisassemblyPosition passed in. This function can be called
+ repeatedly to get more lines of linear disassembly.
+
+ :param LinearDisassemblyPosition pos: Position to start retrieving linear disassembly lines from
+ :param DisassemblySettings settings: DisassemblySettings display settings for the linear disassembly
+ :return: a list of :py:class:`LinearDisassemblyLine` objects for the previous lines.
+ :Example:
+
+ >>> settings = DisassemblySettings()
+ >>> pos = bv.get_linear_disassembly_position_at(0x1000149a, settings)
+ >>> bv.get_previous_linear_disassembly_lines(pos, settings)
+ [<0x10001488: push dword [ebp+0x10 {arg_c}]>, ... , <0x1000149a: >]
+ >>> bv.get_previous_linear_disassembly_lines(pos, settings)
+ [<0x10001483: xor eax, eax {0x0}>, ... , <0x10001488: >]
+ """
+ return self._get_linear_disassembly_lines(core.BNGetPreviousLinearDisassemblyLines, pos, settings)
+
+[docs] def get_next_linear_disassembly_lines(self, pos, settings):
+ """
+ ``get_next_linear_disassembly_lines`` retrieves a list of :py:class:`LinearDisassemblyLine` objects for the
+ next disassembly lines, and updates the LinearDisassemblyPosition passed in. This function can be called
+ repeatedly to get more lines of linear disassembly.
+
+ :param LinearDisassemblyPosition pos: Position to start retrieving linear disassembly lines from
+ :param DisassemblySettings settings: DisassemblySettings display settings for the linear disassembly
+ :return: a list of :py:class:`LinearDisassemblyLine` objects for the next lines.
+ :Example:
+
+ >>> settings = DisassemblySettings()
+ >>> pos = bv.get_linear_disassembly_position_at(0x10001483, settings)
+ >>> bv.get_next_linear_disassembly_lines(pos, settings)
+ [<0x10001483: xor eax, eax {0x0}>, <0x10001485: inc eax {0x1}>, ... , <0x10001488: >]
+ >>> bv.get_next_linear_disassembly_lines(pos, settings)
+ [<0x10001488: push dword [ebp+0x10 {arg_c}]>, ... , <0x1000149a: >]
+ >>>
+ """
+ return self._get_linear_disassembly_lines(core.BNGetNextLinearDisassemblyLines, pos, settings)
+
+[docs] def get_linear_disassembly(self, settings):
+ """
+ ``get_linear_disassembly`` gets an iterator for all lines in the linear disassembly of the view for the given
+ disassembly settings.
+
+ .. note:: linear_disassembly doesn't just return disassembly it will return a single line from the linear view,\
+ and thus will contain both data views, and disassembly.
+
+ :param DisassemblySettings settings: instance specifying the desired output formatting.
+ :return: An iterator containing formatted disassembly lines.
+ :rtype: LinearDisassemblyIterator
+ :Example:
+
+ >>> settings = DisassemblySettings()
+ >>> lines = bv.get_linear_disassembly(settings)
+ >>> for line in lines:
+ ... print(line)
+ ... break
+ ...
+ cf fa ed fe 07 00 00 01 ........
+ """
+ class LinearDisassemblyIterator(object):
+ def __init__(self, view, settings):
+ self.view = view
+ self.settings = settings
+
+ def __iter__(self):
+ pos = self.view.get_linear_disassembly_position_at(self.view.start, self.settings)
+ while True:
+ lines = self.view.get_next_linear_disassembly_lines(pos, self.settings)
+ if len(lines) == 0:
+ break
+ for line in lines:
+ yield line
+
+ return iter(LinearDisassemblyIterator(self, settings))
+
+[docs] def parse_type_string(self, text):
+ """
+ ``parse_type_string`` converts `C-style` string into a :py:Class:`Type`.
+
+ :param str text: `C-style` string of type to create
+ :return: A tuple of a :py:Class:`Type` and type name
+ :rtype: tuple(Type, QualifiedName)
+ :Example:
+
+ >>> bv.parse_type_string("int foo")
+ (<type: int32_t>, 'foo')
+ >>>
+ """
+ result = core.BNQualifiedNameAndType()
+ errors = ctypes.c_char_p()
+ if not core.BNParseTypeString(self.handle, text, result, errors):
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise SyntaxError(error_str)
+ type_obj = types.Type(core.BNNewTypeReference(result.type), platform = self.platform)
+ name = types.QualifiedName._from_core_struct(result.name)
+ core.BNFreeQualifiedNameAndType(result)
+ return type_obj, name
+
+[docs] def get_type_by_name(self, name):
+ """
+ ``get_type_by_name`` returns the defined type whose name corresponds with the provided ``name``
+
+ :param QualifiedName name: Type name to lookup
+ :return: A :py:Class:`Type` or None if the type does not exist
+ :rtype: Type or None
+ :Example:
+
+ >>> type, name = bv.parse_type_string("int foo")
+ >>> bv.define_user_type(name, type)
+ >>> bv.get_type_by_name(name)
+ <type: int32_t>
+ >>>
+ """
+ name = types.QualifiedName(name)._get_core_struct()
+ obj = core.BNGetAnalysisTypeByName(self.handle, name)
+ if not obj:
+ return None
+ return types.Type(obj, platform = self.platform)
+
+[docs] def get_type_by_id(self, id):
+ """
+ ``get_type_by_id`` returns the defined type whose unique identifier corresponds with the provided ``id``
+
+ :param str id: Unique identifier to lookup
+ :return: A :py:Class:`Type` or None if the type does not exist
+ :rtype: Type or None
+ :Example:
+
+ >>> type, name = bv.parse_type_string("int foo")
+ >>> type_id = Type.generate_auto_type_id("source", name)
+ >>> bv.define_type(type_id, name, type)
+ >>> bv.get_type_by_id(type_id)
+ <type: int32_t>
+ >>>
+ """
+ obj = core.BNGetAnalysisTypeById(self.handle, id)
+ if not obj:
+ return None
+ return types.Type(obj, platform = self.platform)
+
+[docs] def get_type_name_by_id(self, id):
+ """
+ ``get_type_name_by_id`` returns the defined type name whose unique identifier corresponds with the provided ``id``
+
+ :param str id: Unique identifier to lookup
+ :return: A QualifiedName or None if the type does not exist
+ :rtype: QualifiedName or None
+ :Example:
+
+ >>> type, name = bv.parse_type_string("int foo")
+ >>> type_id = Type.generate_auto_type_id("source", name)
+ >>> bv.define_type(type_id, name, type)
+ 'foo'
+ >>> bv.get_type_name_by_id(type_id)
+ 'foo'
+ >>>
+ """
+ name = core.BNGetAnalysisTypeNameById(self.handle, id)
+ result = types.QualifiedName._from_core_struct(name)
+ core.BNFreeQualifiedName(name)
+ if len(result) == 0:
+ return None
+ return result
+
+[docs] def get_type_id(self, name):
+ """
+ ``get_type_id`` returns the unique identifier of the defined type whose name corresponds with the
+ provided ``name``
+
+ :param QualifiedName name: Type name to lookup
+ :return: The unique identifier of the type
+ :rtype: str
+ :Example:
+
+ >>> type, name = bv.parse_type_string("int foo")
+ >>> type_id = Type.generate_auto_type_id("source", name)
+ >>> registered_name = bv.define_type(type_id, name, type)
+ >>> bv.get_type_id(registered_name) == type_id
+ True
+ >>>
+ """
+ name = types.QualifiedName(name)._get_core_struct()
+ return core.BNGetAnalysisTypeId(self.handle, name)
+
+[docs] def is_type_auto_defined(self, name):
+ """
+ ``is_type_auto_defined`` queries the user type list of name. If name is not in the *user* type list then the name
+ is considered an *auto* type.
+
+ :param QualifiedName name: Name of type to query
+ :return: True if the type is not a *user* type. False if the type is a *user* type.
+ :Example:
+ >>> bv.is_type_auto_defined("foo")
+ True
+ >>> bv.define_user_type("foo", bv.parse_type_string("struct {int x,y;}")[0])
+ >>> bv.is_type_auto_defined("foo")
+ False
+ >>>
+ """
+ name = types.QualifiedName(name)._get_core_struct()
+ return core.BNIsAnalysisTypeAutoDefined(self.handle, name)
+
+[docs] def define_type(self, type_id, default_name, type_obj):
+ """
+ ``define_type`` registers a :py:Class:`Type` ``type_obj`` of the given ``name`` in the global list of types for
+ the current :py:Class:`BinaryView`. This method should only be used for automatically generated types.
+
+ :param str type_id: Unique identifier for the automatically generated type
+ :param QualifiedName default_name: Name of the type to be registered
+ :param Type type_obj: Type object to be registered
+ :return: Registered name of the type. May not be the same as the requested name if the user has renamed types.
+ :rtype: QualifiedName
+ :Example:
+
+ >>> type, name = bv.parse_type_string("int foo")
+ >>> registered_name = bv.define_type(Type.generate_auto_type_id("source", name), name, type)
+ >>> bv.get_type_by_name(registered_name)
+ <type: int32_t>
+ """
+ name = types.QualifiedName(default_name)._get_core_struct()
+ reg_name = core.BNDefineAnalysisType(self.handle, type_id, name, type_obj.handle)
+ result = types.QualifiedName._from_core_struct(reg_name)
+ core.BNFreeQualifiedName(reg_name)
+ return result
+
+[docs] def define_user_type(self, name, type_obj):
+ """
+ ``define_user_type`` registers a :py:Class:`Type` ``type_obj`` of the given ``name`` in the global list of user
+ types for the current :py:Class:`BinaryView`.
+
+ :param QualifiedName name: Name of the user type to be registered
+ :param Type type_obj: Type object to be registered
+ :rtype: None
+ :Example:
+
+ >>> type, name = bv.parse_type_string("int foo")
+ >>> bv.define_user_type(name, type)
+ >>> bv.get_type_by_name(name)
+ <type: int32_t>
+ """
+ name = types.QualifiedName(name)._get_core_struct()
+ core.BNDefineUserAnalysisType(self.handle, name, type_obj.handle)
+
+[docs] def undefine_type(self, type_id):
+ """
+ ``undefine_type`` removes a :py:Class:`Type` from the global list of types for the current :py:Class:`BinaryView`
+
+ :param str type_id: Unique identifier of type to be undefined
+ :rtype: None
+ :Example:
+
+ >>> type, name = bv.parse_type_string("int foo")
+ >>> type_id = Type.generate_auto_type_id("source", name)
+ >>> bv.define_type(type_id, name, type)
+ >>> bv.get_type_by_name(name)
+ <type: int32_t>
+ >>> bv.undefine_type(type_id)
+ >>> bv.get_type_by_name(name)
+ >>>
+ """
+ core.BNUndefineAnalysisType(self.handle, type_id)
+
+[docs] def undefine_user_type(self, name):
+ """
+ ``undefine_user_type`` removes a :py:Class:`Type` from the global list of user types for the current
+ :py:Class:`BinaryView`
+
+ :param QualifiedName name: Name of user type to be undefined
+ :rtype: None
+ :Example:
+
+ >>> type, name = bv.parse_type_string("int foo")
+ >>> bv.define_user_type(name, type)
+ >>> bv.get_type_by_name(name)
+ <type: int32_t>
+ >>> bv.undefine_user_type(name)
+ >>> bv.get_type_by_name(name)
+ >>>
+ """
+ name = types.QualifiedName(name)._get_core_struct()
+ core.BNUndefineUserAnalysisType(self.handle, name)
+
+[docs] def rename_type(self, old_name, new_name):
+ """
+ ``rename_type`` renames a type in the global list of types for the current :py:Class:`BinaryView`
+
+ :param QualifiedName old_name: Existing name of type to be renamed
+ :param QualifiedName new_name: New name of type to be renamed
+ :rtype: None
+ :Example:
+
+ >>> type, name = bv.parse_type_string("int foo")
+ >>> bv.define_user_type(name, type)
+ >>> bv.get_type_by_name("foo")
+ <type: int32_t>
+ >>> bv.rename_type("foo", "bar")
+ >>> bv.get_type_by_name("bar")
+ <type: int32_t>
+ >>>
+ """
+ old_name = types.QualifiedName(old_name)._get_core_struct()
+ new_name = types.QualifiedName(new_name)._get_core_struct()
+ core.BNRenameAnalysisType(self.handle, old_name, new_name)
+
+[docs] def register_platform_types(self, platform):
+ """
+ ``register_platform_types`` ensures that the platform-specific types for a :py:Class:`Platform` are available
+ for the current :py:Class:`BinaryView`. This is automatically performed when adding a new function or setting
+ the default platform.
+
+ :param Platform platform: Platform containing types to be registered
+ :rtype: None
+ :Example:
+
+ >>> platform = Platform["linux-x86"]
+ >>> bv.register_platform_types(platform)
+ >>>
+ """
+ core.BNRegisterPlatformTypes(self.handle, platform.handle)
+
+[docs] def find_next_data(self, start, data, flags=FindFlag.FindCaseSensitive):
+ """
+ ``find_next_data`` searches for the bytes ``data`` starting at the virtual address ``start`` until the end of the BinaryView.
+
+ :param int start: virtual address to start searching from.
+ :param str data: data to search for
+ :param FindFlag flags: (optional) defaults to case-insensitive data search
+
+ ==================== ============================
+ FindFlag Description
+ ==================== ============================
+ FindCaseSensitive Case-sensitive search
+ FindCaseInsensitive Case-insensitive search
+ ===================== ============================
+ """
+ buf = databuffer.DataBuffer(str(data))
+ result = ctypes.c_ulonglong()
+ if not core.BNFindNextData(self.handle, start, buf.handle, result, flags):
+ return None
+ return result.value
+
+
+[docs] def find_next_text(self, start, text, settings=None, flags=FindFlag.FindCaseSensitive):
+ """
+ ``find_next_text`` searches for string ``text`` occurring in the linear view output starting at the virtual
+ address ``start`` until the end of the BinaryView.
+
+ :param int start: virtual address to start searching from.
+ :param str text: text to search for
+ :param FindFlag flags: (optional) defaults to case-insensitive data search
+
+ ==================== ============================
+ FindFlag Description
+ ==================== ============================
+ FindCaseSensitive Case-sensitive search
+ FindCaseInsensitive Case-insensitive search
+ ===================== ============================
+ """
+ if not isinstance(text, str):
+ raise TypeError("text parameter is not str type")
+ if settings is None:
+ settings = function.DisassemblySettings()
+ if not isinstance(settings, function.DisassemblySettings):
+ raise TypeError("settings parameter is not DisassemblySettings type")
+
+ result = ctypes.c_ulonglong()
+ if not core.BNFindNextText(self.handle, start, text, result, settings.handle, flags):
+ return None
+ return result.value
+
+[docs] def find_next_constant(self, start, constant, settings=None):
+ """
+ ``find_next_constant`` searches for integer constant ``constant`` occurring in the linear view output starting at the virtual
+ address ``start`` until the end of the BinaryView.
+
+ :param int start: virtual address to start searching from.
+ :param int constant: constant to search for
+ """
+ if not isinstance(constant, numbers.Integral):
+ raise TypeError("constant parameter is not integral type")
+ if settings is None:
+ settings = function.DisassemblySettings()
+ if not isinstance(settings, function.DisassemblySettings):
+ raise TypeError("settings parameter is not DisassemblySettings type")
+
+ result = ctypes.c_ulonglong()
+ if not core.BNFindNextConstant(self.handle, start, constant, result, settings.handle):
+ return None
+ return result.value
+
+[docs] def reanalyze(self):
+ """
+ ``reanalyze`` causes all functions to be reanalyzed. This function does not wait for the analysis to finish.
+
+ :rtype: None
+ """
+ core.BNReanalyzeAllFunctions(self.handle)
+
+[docs] def show_plain_text_report(self, title, contents):
+ core.BNShowPlainTextReport(self.handle, title, contents)
+
+[docs] def show_markdown_report(self, title, contents, plaintext = ""):
+ """
+ ``show_markdown_report`` displays the markdown contents in UI applications and plaintext in command-line
+ applications. Markdown reports support hyperlinking into the BinaryView. Hyperlinks can be specified as follows:
+ ``binaryninja://?expr=_start`` Where ``expr=`` specifies an expression parsable by the `parse_expression` API.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used.
+
+ :param str contents: markdown contents to display
+ :param str plaintext: Plain text version to display (used on the command-line)
+ :rtype: None
+ :Example:
+ >>> bv.show_markdown_report("title", "##Contents", "Plain text contents")
+ Plain text contents
+ """
+ core.BNShowMarkdownReport(self.handle, title, contents, plaintext)
+
+[docs] def show_html_report(self, title, contents, plaintext = ""):
+ """
+ ``show_html_report`` displays the HTML contents in UI applications and plaintext in command-line
+ applications. HTML reports support hyperlinking into the BinaryView. Hyperlinks can be specified as follows:
+ ``binaryninja://?expr=_start`` Where ``expr=`` specifies an expression parsable by the `parse_expression` API.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used.
+
+ :param str contents: HTML contents to display
+ :param str plaintext: Plain text version to display (used on the command-line)
+ :rtype: None
+ :Example:
+ >>> bv.show_html_report("title", "<h1>Contents</h1>", "Plain text contents")
+ Plain text contents
+ """
+ core.BNShowHTMLReport(self.handle, title, contents, plaintext)
+
+[docs] def show_graph_report(self, title, graph):
+ core.BNShowHTMLReport(self.handle, title, graph.handle)
+
+[docs] def get_address_input(self, prompt, title, current_address = None):
+ if current_address is None:
+ current_address = self.file.offset
+ value = ctypes.c_ulonglong()
+ if not core.BNGetAddressInput(value, prompt, title, self.handle, current_address):
+ return None
+ return value.value
+
+[docs] def add_auto_segment(self, start, length, data_offset, data_length, flags):
+ core.BNAddAutoSegment(self.handle, start, length, data_offset, data_length, flags)
+
+[docs] def remove_auto_segment(self, start, length):
+ core.BNRemoveAutoSegment(self.handle, start, length)
+
+[docs] def add_user_segment(self, start, length, data_offset, data_length, flags):
+ core.BNAddUserSegment(self.handle, start, length, data_offset, data_length, flags)
+
+[docs] def remove_user_segment(self, start, length):
+ core.BNRemoveUserSegment(self.handle, start, length)
+
+[docs] def get_segment_at(self, addr):
+ seg = core.BNGetSegmentAt(self.handle, addr)
+ if not seg:
+ return None
+ return Segment(core.BNNewSegmentReference(seg))
+
+[docs] def get_address_for_data_offset(self, offset):
+ address = ctypes.c_ulonglong()
+ if not core.BNGetAddressForDataOffset(self.handle, offset, address):
+ return None
+ return address.value
+
+[docs] def add_auto_section(self, name, start, length, semantics = SectionSemantics.DefaultSectionSemantics,
+ type = "", align = 1, entry_size = 1, linked_section = "", info_section = "", info_data = 0):
+ core.BNAddAutoSection(self.handle, name, start, length, semantics, type, align, entry_size, linked_section,
+ info_section, info_data)
+
+
+
+[docs] def add_user_section(self, name, start, length, semantics = SectionSemantics.DefaultSectionSemantics,
+ type = "", align = 1, entry_size = 1, linked_section = "", info_section = "", info_data = 0):
+ core.BNAddUserSection(self.handle, name, start, length, semantics, type, align, entry_size, linked_section,
+ info_section, info_data)
+
+
+
+[docs] def get_sections_at(self, addr):
+ count = ctypes.c_ulonglong(0)
+ section_list = core.BNGetSectionsAt(self.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(Section(core.BNNewSectionReference(section_list[i])))
+ core.BNFreeSectionList(section_list, count.value)
+ return result
+
+[docs] def get_section_by_name(self, name):
+ section = core.BNGetSectionByName(self.handle, name)
+ if not section:
+ return None
+ result = Section(core.BNNewSectionReference(section))
+ return result
+
+[docs] def get_unique_section_names(self, name_list):
+ incoming_names = (ctypes.c_char_p * len(name_list))()
+ for i in range(0, len(name_list)):
+ incoming_names[i] = binaryninja.cstr(name_list[i])
+ outgoing_names = core.BNGetUniqueSectionNames(self.handle, incoming_names, len(name_list))
+ result = []
+ for i in range(0, len(name_list)):
+ result.append(str(outgoing_names[i]))
+ core.BNFreeStringList(outgoing_names, len(name_list))
+ return result
+
+[docs] def query_metadata(self, key):
+ """
+ `query_metadata` retrieves a metadata associated with the given key stored in the current BinaryView.
+
+ :param string key: key to query
+ :rtype: metadata associated with the key
+ :Example:
+
+ >>> bv.store_metadata("integer", 1337)
+ >>> bv.query_metadata("integer")
+ 1337L
+ >>> bv.store_metadata("list", [1,2,3])
+ >>> bv.query_metadata("list")
+ [1L, 2L, 3L]
+ >>> bv.store_metadata("string", "my_data")
+ >>> bv.query_metadata("string")
+ 'my_data'
+ """
+ md_handle = core.BNBinaryViewQueryMetadata(self.handle, key)
+ if md_handle is None:
+ raise KeyError(key)
+ return metadata.Metadata(handle=md_handle).value
+
+[docs] def store_metadata(self, key, md):
+ """
+ `store_metadata` stores an object for the given key in the current BinaryView. Objects stored using
+ `store_metadata` can be retrieved when the database is reopened. Objects stored are not arbitrary python
+ objects! The values stored must be able to be held in a Metadata object. See :py:class:`Metadata`
+ for more information. Python objects could obviously be serialized using pickle but this intentionally
+ a task left to the user since there is the potential security issues.
+
+ :param string key: key value to associate the Metadata object with
+ :param Varies md: object to store.
+ :rtype: None
+ :Example:
+
+ >>> bv.store_metadata("integer", 1337)
+ >>> bv.query_metadata("integer")
+ 1337L
+ >>> bv.store_metadata("list", [1,2,3])
+ >>> bv.query_metadata("list")
+ [1L, 2L, 3L]
+ >>> bv.store_metadata("string", "my_data")
+ >>> bv.query_metadata("string")
+ 'my_data'
+ """
+ if not isinstance(md, metadata.Metadata):
+ md = metadata.Metadata(md)
+ core.BNBinaryViewStoreMetadata(self.handle, key, md.handle)
+
+[docs] def remove_metadata(self, key):
+ """
+ `remove_metadata` removes the metadata associated with key from the current BinaryView.
+
+ :param string key: key associated with metadata to remove from the BinaryView
+ :rtype: None
+ :Example:
+
+ >>> bv.store_metadata("integer", 1337)
+ >>> bv.remove_metadata("integer")
+ """
+ core.BNBinaryViewRemoveMetadata(self.handle, key)
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+[docs] def parse_expression(self, expression, here=0):
+ """
+ Evaluates an string expression to an integer value.
+
+ The parser uses the following rules:
+ - symbols are defined by the lexer as `[A-Za-z0-9_:<>][A-Za-z0-9_:$\-<>]+` or anything enclosed in either single or
+ double quotes
+ - Numbers are defaulted to hexadecimal thus `_printf + 10` is equivalent to `printf + 0x10` If decimal numbers required use the decimal prefix.
+ - Since numbers and symbols can be ambiguous its recommended that you prefix your numbers with the following:
+ - 0x - Hexadecimal
+ - 0n - Decimal
+ - 0 - Octal
+ - In the case of an ambiguous number/symbol (one with no prefix) for instance `12345` we will first attempt
+ to look up the string as a symbol, if a symbol is found its address is used, otherwise we attempt to convert
+ it to a hexadecimal number.
+ - The following operations are valid: +, -, *, /, %, (), &, |, ^, ~
+ - In addition to the above operators there are _il-style_ dereference operators
+ - [<expression>] - read the _current address size_ at <expression>
+ - [<expression>].b - read the byte at <expression>
+ - [<expression>].w - read the word (2 bytes) at <expression>
+ - [<expression>].d - read the dword (4 bytes) at <expression>
+ - [<expression>].q - read the quadword (8 bytes) at <expression>
+ - The `$here` keyword can be used in calculations and is defined as the `here` parameter
+
+ :param string expression: Arithmetic expression to be evaluated
+ :param int here: (optional) Base address for relative expressions, defaults to zero
+ :rtype: int
+ """
+ offset = ctypes.c_ulonglong()
+ errors = ctypes.c_char_p()
+ if not core.BNParseExpression(self.handle, expression, offset, here, errors):
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise ValueError(error_str)
+ return offset.value
+
+[docs] def eval(self, expression, here=0):
+ """
+ Evaluates an string expression to an integer value. This is a more concise alias for the `parse_expression` API
+ See `parse_expression` for details on usage.
+ """
+ return self.parse_expression(expression, here)
+
+
+[docs]class BinaryReader(object):
+ """
+ ``class BinaryReader`` is a convenience class for reading binary data.
+
+ BinaryReader can be instantiated as follows and the rest of the document will start from this context ::
+
+ >>> from binaryninja import *
+ >>> bv = BinaryViewType['Mach-O'].open("/bin/ls")
+ >>> br = BinaryReader(bv)
+ >>> hex(br.read32())
+ '0xfeedfacfL'
+ >>>
+
+ Or using the optional endian parameter ::
+
+ >>> from binaryninja import *
+ >>> br = BinaryReader(bv, Endianness.BigEndian)
+ >>> hex(br.read32())
+ '0xcffaedfeL'
+ >>>
+ """
+[docs] def __init__(self, view, endian = None):
+ self.handle = core.BNCreateBinaryReader(view.handle)
+ if endian is None:
+ core.BNSetBinaryReaderEndianness(self.handle, view.endianness)
+ else:
+ core.BNSetBinaryReaderEndianness(self.handle, endian)
+
+ def __del__(self):
+ core.BNFreeBinaryReader(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, BinaryReader):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, BinaryReader):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def endianness(self):
+ """
+ The Endianness to read data. (read/write)
+
+ :getter: returns the endianness of the reader
+ :setter: sets the endianness of the reader (BigEndian or LittleEndian)
+ :type: Endianness
+ """
+ return core.BNGetBinaryReaderEndianness(self.handle)
+
+ @endianness.setter
+ def endianness(self, value):
+ core.BNSetBinaryReaderEndianness(self.handle, value)
+
+ @property
+ def offset(self):
+ """
+ The current read offset (read/write).
+
+ :getter: returns the current internal offset
+ :setter: sets the internal offset
+ :type: int
+ """
+ return core.BNGetReaderPosition(self.handle)
+
+ @offset.setter
+ def offset(self, value):
+ core.BNSeekBinaryReader(self.handle, value)
+
+ @property
+ def eof(self):
+ """
+ Is end of file (read-only)
+
+ :getter: returns boolean, true if end of file, false otherwise
+ :type: bool
+ """
+ return core.BNIsEndOfFile(self.handle)
+
+[docs] def read(self, length):
+ """
+ ``read`` returns ``length`` bytes read from the current offset, adding ``length`` to offset.
+
+ :param int length: number of bytes to read.
+ :return: ``length`` bytes from current offset
+ :rtype: str, or None on failure
+ :Example:
+
+ >>> br.read(8)
+ '\\xcf\\xfa\\xed\\xfe\\x07\\x00\\x00\\x01'
+ >>>
+ """
+ dest = ctypes.create_string_buffer(length)
+ if not core.BNReadData(self.handle, dest, length):
+ return None
+ return dest.raw
+
+[docs] def read8(self):
+ """
+ ``read8`` returns a one byte integer from offset incrementing the offset.
+
+ :return: byte at offset.
+ :rtype: int, or None on failure
+ :Example:
+
+ >>> br.seek(0x100000000)
+ >>> br.read8()
+ 207
+ >>>
+ """
+ result = ctypes.c_ubyte()
+ if not core.BNRead8(self.handle, result):
+ return None
+ return result.value
+
+[docs] def read16(self):
+ """
+ ``read16`` returns a two byte integer from offset incrementing the offset by two, using specified endianness.
+
+ :return: a two byte integer at offset.
+ :rtype: int, or None on failure
+ :Example:
+
+ >>> br.seek(0x100000000)
+ >>> hex(br.read16())
+ '0xfacf'
+ >>>
+ """
+ result = ctypes.c_ushort()
+ if not core.BNRead16(self.handle, result):
+ return None
+ return result.value
+
+[docs] def read32(self):
+ """
+ ``read32`` returns a four byte integer from offset incrementing the offset by four, using specified endianness.
+
+ :return: a four byte integer at offset.
+ :rtype: int, or None on failure
+ :Example:
+
+ >>> br.seek(0x100000000)
+ >>> hex(br.read32())
+ '0xfeedfacfL'
+ >>>
+ """
+ result = ctypes.c_uint()
+ if not core.BNRead32(self.handle, result):
+ return None
+ return result.value
+
+[docs] def read64(self):
+ """
+ ``read64`` returns an eight byte integer from offset incrementing the offset by eight, using specified endianness.
+
+ :return: an eight byte integer at offset.
+ :rtype: int, or None on failure
+ :Example:
+
+ >>> br.seek(0x100000000)
+ >>> hex(br.read64())
+ '0x1000007feedfacfL'
+ >>>
+ """
+ result = ctypes.c_ulonglong()
+ if not core.BNRead64(self.handle, result):
+ return None
+ return result.value
+
+[docs] def read16le(self):
+ """
+ ``read16le`` returns a two byte little endian integer from offset incrementing the offset by two.
+
+ :return: a two byte integer at offset.
+ :rtype: int, or None on failure
+ :Exmaple:
+
+ >>> br.seek(0x100000000)
+ >>> hex(br.read16le())
+ '0xfacf'
+ >>>
+ """
+ result = self.read(2)
+ if (result is None) or (len(result) != 2):
+ return None
+ return struct.unpack("<H", result)[0]
+
+[docs] def read32le(self):
+ """
+ ``read32le`` returns a four byte little endian integer from offset incrementing the offset by four.
+
+ :return: a four byte integer at offset.
+ :rtype: int, or None on failure
+ :Example:
+
+ >>> br.seek(0x100000000)
+ >>> hex(br.read32le())
+ '0xfeedfacf'
+ >>>
+ """
+ result = self.read(4)
+ if (result is None) or (len(result) != 4):
+ return None
+ return struct.unpack("<I", result)[0]
+
+[docs] def read64le(self):
+ """
+ ``read64le`` returns an eight byte little endian integer from offset incrementing the offset by eight.
+
+ :return: a eight byte integer at offset.
+ :rtype: int, or None on failure
+ :Example:
+
+ >>> br.seek(0x100000000)
+ >>> hex(br.read64le())
+ '0x1000007feedfacf'
+ >>>
+ """
+ result = self.read(8)
+ if (result is None) or (len(result) != 8):
+ return None
+ return struct.unpack("<Q", result)[0]
+
+[docs] def read16be(self):
+ """
+ ``read16be`` returns a two byte big endian integer from offset incrementing the offset by two.
+
+ :return: a two byte integer at offset.
+ :rtype: int, or None on failure
+ :Example:
+
+ >>> br.seek(0x100000000)
+ >>> hex(br.read16be())
+ '0xcffa'
+ >>>
+ """
+ result = self.read(2)
+ if (result is None) or (len(result) != 2):
+ return None
+ return struct.unpack(">H", result)[0]
+
+[docs] def read32be(self):
+ """
+ ``read32be`` returns a four byte big endian integer from offset incrementing the offset by four.
+
+ :return: a four byte integer at offset.
+ :rtype: int, or None on failure
+ :Example:
+
+ >>> br.seek(0x100000000)
+ >>> hex(br.read32be())
+ '0xcffaedfe'
+ >>>
+ """
+ result = self.read(4)
+ if (result is None) or (len(result) != 4):
+ return None
+ return struct.unpack(">I", result)[0]
+
+[docs] def read64be(self):
+ """
+ ``read64be`` returns an eight byte big endian integer from offset incrementing the offset by eight.
+
+ :return: a eight byte integer at offset.
+ :rtype: int, or None on failure
+ :Example:
+
+ >>> br.seek(0x100000000)
+ >>> hex(br.read64be())
+ '0xcffaedfe07000001L'
+ """
+ result = self.read(8)
+ if (result is None) or (len(result) != 8):
+ return None
+ return struct.unpack(">Q", result)[0]
+
+[docs] def seek(self, offset):
+ """
+ ``seek`` update internal offset to ``offset``.
+
+ :param int offset: offset to set the internal offset to
+ :rtype: None
+ :Example:
+
+ >>> hex(br.offset)
+ '0x100000008L'
+ >>> br.seek(0x100000000)
+ >>> hex(br.offset)
+ '0x100000000L'
+ >>>
+ """
+ core.BNSeekBinaryReader(self.handle, offset)
+
+[docs] def seek_relative(self, offset):
+ """
+ ``seek_relative`` updates the internal offset by ``offset``.
+
+ :param int offset: offset to add to the internal offset
+ :rtype: None
+ :Example:
+
+ >>> hex(br.offset)
+ '0x100000008L'
+ >>> br.seek_relative(-8)
+ >>> hex(br.offset)
+ '0x100000000L'
+ >>>
+ """
+ core.BNSeekBinaryReaderRelative(self.handle, offset)
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+
+[docs]class BinaryWriter(object):
+ """
+ ``class BinaryWriter`` is a convenience class for writing binary data.
+
+ BinaryWriter can be instantiated as follows and the rest of the document will start from this context ::
+
+ >>> from binaryninja import *
+ >>> bv = BinaryViewType['Mach-O'].open("/bin/ls")
+ >>> br = BinaryReader(bv)
+ >>> bw = BinaryWriter(bv)
+ >>>
+
+ Or using the optional endian parameter ::
+
+ >>> from binaryninja import *
+ >>> br = BinaryReader(bv, Endianness.BigEndian)
+ >>> bw = BinaryWriter(bv, Endianness.BigEndian)
+ >>>
+ """
+[docs] def __init__(self, view, endian = None):
+ self.handle = core.BNCreateBinaryWriter(view.handle)
+ if endian is None:
+ core.BNSetBinaryWriterEndianness(self.handle, view.endianness)
+ else:
+ core.BNSetBinaryWriterEndianness(self.handle, endian)
+
+ def __del__(self):
+ core.BNFreeBinaryWriter(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, BinaryWriter):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, BinaryWriter):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def endianness(self):
+ """
+ The Endianness to written data. (read/write)
+
+ :getter: returns the endianness of the reader
+ :setter: sets the endianness of the reader (BigEndian or LittleEndian)
+ :type: Endianness
+ """
+ return core.BNGetBinaryWriterEndianness(self.handle)
+
+ @endianness.setter
+ def endianness(self, value):
+ core.BNSetBinaryWriterEndianness(self.handle, value)
+
+ @property
+ def offset(self):
+ """
+ The current write offset (read/write).
+
+ :getter: returns the current internal offset
+ :setter: sets the internal offset
+ :type: int
+ """
+ return core.BNGetWriterPosition(self.handle)
+
+ @offset.setter
+ def offset(self, value):
+ core.BNSeekBinaryWriter(self.handle, value)
+
+[docs] def write(self, value):
+ """
+ ``write`` writes ``len(value)`` bytes to the internal offset, without regard to endianness.
+
+ :param str value: bytes to be written at current offset
+ :return: boolean True on success, False on failure.
+ :rtype: bool
+ :Example:
+
+ >>> bw.write("AAAA")
+ True
+ >>> br.read(4)
+ 'AAAA'
+ >>>
+ """
+ value = str(value)
+ buf = ctypes.create_string_buffer(len(value))
+ ctypes.memmove(buf, value, len(value))
+ return core.BNWriteData(self.handle, buf, len(value))
+
+[docs] def write8(self, value):
+ """
+ ``write8`` lowest order byte from the integer ``value`` to the current offset.
+
+ :param str value: bytes to be written at current offset
+ :return: boolean
+ :rtype: int
+ :Example:
+
+ >>> bw.write8(0x42)
+ True
+ >>> br.read(1)
+ 'B'
+ >>>
+ """
+ return core.BNWrite8(self.handle, value)
+
+[docs] def write16(self, value):
+ """
+ ``write16`` writes the lowest order two bytes from the integer ``value`` to the current offset, using internal endianness.
+
+ :param int value: integer value to write.
+ :return: boolean True on success, False on failure.
+ :rtype: bool
+ """
+ return core.BNWrite16(self.handle, value)
+
+[docs] def write32(self, value):
+ """
+ ``write32`` writes the lowest order four bytes from the integer ``value`` to the current offset, using internal endianness.
+
+ :param int value: integer value to write.
+ :return: boolean True on success, False on failure.
+ :rtype: bool
+ """
+ return core.BNWrite32(self.handle, value)
+
+[docs] def write64(self, value):
+ """
+ ``write64`` writes the lowest order eight bytes from the integer ``value`` to the current offset, using internal endianness.
+
+ :param int value: integer value to write.
+ :return: boolean True on success, False on failure.
+ :rtype: bool
+ """
+ return core.BNWrite64(self.handle, value)
+
+[docs] def write16le(self, value):
+ """
+ ``write16le`` writes the lowest order two bytes from the little endian integer ``value`` to the current offset.
+
+ :param int value: integer value to write.
+ :return: boolean True on success, False on failure.
+ :rtype: bool
+ """
+ value = struct.pack("<H", value)
+ return self.write(value)
+
+[docs] def write32le(self, value):
+ """
+ ``write32le`` writes the lowest order four bytes from the little endian integer ``value`` to the current offset.
+
+ :param int value: integer value to write.
+ :return: boolean True on success, False on failure.
+ :rtype: bool
+ """
+ value = struct.pack("<I", value)
+ return self.write(value)
+
+[docs] def write64le(self, value):
+ """
+ ``write64le`` writes the lowest order eight bytes from the little endian integer ``value`` to the current offset.
+
+ :param int value: integer value to write.
+ :return: boolean True on success, False on failure.
+ :rtype: bool
+ """
+ value = struct.pack("<Q", value)
+ return self.write(value)
+
+[docs] def write16be(self, value):
+ """
+ ``write16be`` writes the lowest order two bytes from the big endian integer ``value`` to the current offset.
+
+ :param int value: integer value to write.
+ :return: boolean True on success, False on failure.
+ :rtype: bool
+ """
+ value = struct.pack(">H", value)
+ return self.write(value)
+
+[docs] def write32be(self, value):
+ """
+ ``write32be`` writes the lowest order four bytes from the big endian integer ``value`` to the current offset.
+
+ :param int value: integer value to write.
+ :return: boolean True on success, False on failure.
+ :rtype: bool
+ """
+ value = struct.pack(">I", value)
+ return self.write(value)
+
+[docs] def write64be(self, value):
+ """
+ ``write64be`` writes the lowest order eight bytes from the big endian integer ``value`` to the current offset.
+
+ :param int value: integer value to write.
+ :return: boolean True on success, False on failure.
+ :rtype: bool
+ """
+ value = struct.pack(">Q", value)
+ return self.write(value)
+
+[docs] def seek(self, offset):
+ """
+ ``seek`` update internal offset to ``offset``.
+
+ :param int offset: offset to set the internal offset to
+ :rtype: None
+ :Example:
+
+ >>> hex(bw.offset)
+ '0x100000008L'
+ >>> bw.seek(0x100000000)
+ >>> hex(bw.offset)
+ '0x100000000L'
+ >>>
+ """
+ core.BNSeekBinaryWriter(self.handle, offset)
+
+[docs] def seek_relative(self, offset):
+ """
+ ``seek_relative`` updates the internal offset by ``offset``.
+
+ :param int offset: offset to add to the internal offset
+ :rtype: None
+ :Example:
+
+ >>> hex(bw.offset)
+ '0x100000008L'
+ >>> bw.seek_relative(-8)
+ >>> hex(bw.offset)
+ '0x100000000L'
+ >>>
+ """
+ core.BNSeekBinaryWriterRelative(self.handle, offset)
+
+[docs]class StructuredDataValue(object):
+[docs] def __init__(self, type, address, value):
+ self._type = type
+ self._address = address
+ self._value = value
+
+ @property
+ def type(self):
+ return self._type
+
+ @property
+ def width(self):
+ return self._type.width
+
+ @property
+ def address(self):
+ return self._address
+
+ @property
+ def value(self):
+ return self._value
+
+ @property
+ def int(self):
+ return int(self)
+
+ @property
+ def str(self):
+ return str(self)
+
+ def __int__(self):
+ if self._type.width == 1:
+ code = "B"
+ elif self._type.width == 2:
+ code = "H"
+ elif self._type.width == 4:
+ code = "I"
+ elif self._type.width == 8:
+ code = "Q"
+ else:
+ raise Exception("Could not convert to integer with width {}".format(self._type.width))
+
+ return struct.unpack(code, self._value)[0]
+
+ def __str__(self):
+ decode_str = "{}B".format(self._type.width)
+ return ' '.join(["{:02x}".format(x) for x in struct.unpack(decode_str, self._value)])
+
+ def __repr__(self):
+ return "<StructuredDataValue type:{} value:{}>".format(str(self._type), str(self))
+
+
+[docs]class StructuredDataView(object):
+ """
+ ``class StructuredDataView`` is a convenience class for reading structured binary data.
+
+ StructuredDataView can be instantiated as follows:
+
+ >>> from binaryninja import *
+ >>> bv = BinaryViewType['Mach-O'].open("/bin/ls")
+ >>> structure = "Elf64_Header"
+ >>> address = bv.start
+ >>> elf = StructuredDataView(bv, structure, address)
+ >>>
+
+ Once instantiated, members can be accessed:
+
+ >>> print("{:x}".format(elf.machine))
+ 003e
+ >>>
+
+ """
+ _structure = None
+ _structure_name = None
+ _address = 0
+ _bv = None
+ _members = OrderedDict()
+
+[docs] def __init__(self, bv, structure_name, address):
+ self._bv = bv
+ self._structure_name = structure_name
+ self._address = address
+
+ self._lookup_structure()
+ self._define_members()
+
+ def _lookup_structure(self):
+ s = self._bv.types.get(self._structure_name, None)
+ if s is None:
+ raise Exception("Could not find structure with name: {}".format(self._structure_name))
+
+ if s.type_class != TypeClass.StructureTypeClass:
+ raise Exception("{} is not a StructureTypeClass, got: {}".format(self._structure_name, s._type_class))
+
+ self._structure = s.structure
+
+ def _define_members(self):
+ for m in self._structure.members:
+ self._members[m.name] = m
+
+ def __getattr__(self, key):
+ m = self._members.get(key, None)
+ if m is None:
+ return self.__getattribute__(key)
+
+ return self[key]
+
+ def __getitem__(self, key):
+ m = self._members.get(key, None)
+ if m is None:
+ return super(StructuredDataView, self).__getitem__(key)
+
+ ty = m.type
+ offset = m.offset
+ width = ty.width
+
+ value = self._bv.read(self._address + offset, width)
+ return StructuredDataValue(ty, self._address + offset, value)
+
+ def __str__(self):
+ rv = "struct {name} 0x{addr:x} {{\n".format(name=self._structure_name, addr=self._address)
+ for k in self._members:
+ m = self._members[k]
+
+ ty = m.type
+ offset = m.offset
+
+ formatted_offset = "{:=+x}".format(offset)
+ formatted_type = "{:s} {:s}".format(str(ty), k)
+
+ value = self[k]
+ if value.width in (1, 2, 4, 8):
+ formatted_value = str.zfill("{:x}".format(value.int), value.width * 2)
+ else:
+ formatted_value = str(value)
+
+ rv += "\t{:>6s} {:40s} = {:30s}\n".format(formatted_offset, formatted_type, formatted_value)
+
+ rv += "}\n"
+
+ return rv
+
+ def __repr__(self):
+ return "<StructuredDataView type:{} size:{:#x} address:{:#x}>".format(self._structure_name,
+ self._structure.width, self._address)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import traceback
+import ctypes
+
+# Binary Ninja components
+import binaryninja
+from binaryninja import _binaryninjacore as core
+from binaryninja import log
+from binaryninja.enums import VariableSourceType
+
+# 2-3 compatibility
+from binaryninja import range
+
+
+[docs]class CallingConvention(object):
+ name = None
+ caller_saved_regs = []
+ callee_saved_regs = []
+ int_arg_regs = []
+ float_arg_regs = []
+ arg_regs_share_index = False
+ stack_reserved_for_arg_regs = False
+ stack_adjusted_on_return = False
+ int_return_reg = None
+ high_int_return_reg = None
+ float_return_reg = None
+ global_pointer_reg = None
+ implicitly_defined_regs = []
+
+ _registered_calling_conventions = []
+
+[docs] def __init__(self, arch=None, name=None, handle=None, confidence=binaryninja.types.max_confidence):
+ if handle is None:
+ if arch is None or name is None:
+ self.handle = None
+ raise ValueError("Must specify either handle or architecture and name")
+ self.arch = arch
+ self._pending_reg_lists = {}
+ self._cb = core.BNCustomCallingConvention()
+ self._cb.context = 0
+ self._cb.getCallerSavedRegisters = self._cb.getCallerSavedRegisters.__class__(self._get_caller_saved_regs)
+ self._cb.getCalleeSavedRegisters = self._cb.getCalleeSavedRegisters.__class__(self._get_callee_saved_regs)
+ self._cb.getIntegerArgumentRegisters = self._cb.getIntegerArgumentRegisters.__class__(self._get_int_arg_regs)
+ self._cb.getFloatArgumentRegisters = self._cb.getFloatArgumentRegisters.__class__(self._get_float_arg_regs)
+ self._cb.freeRegisterList = self._cb.freeRegisterList.__class__(self._free_register_list)
+ self._cb.areArgumentRegistersSharedIndex = self._cb.areArgumentRegistersSharedIndex.__class__(self._arg_regs_share_index)
+ self._cb.isStackReservedForArgumentRegisters = self._cb.isStackReservedForArgumentRegisters.__class__(self._stack_reserved_for_arg_regs)
+ self._cb.isStackAdjustedOnReturn = self._cb.isStackAdjustedOnReturn.__class__(self._stack_adjusted_on_return)
+ self._cb.getIntegerReturnValueRegister = self._cb.getIntegerReturnValueRegister.__class__(self._get_int_return_reg)
+ self._cb.getHighIntegerReturnValueRegister = self._cb.getHighIntegerReturnValueRegister.__class__(self._get_high_int_return_reg)
+ self._cb.getFloatReturnValueRegister = self._cb.getFloatReturnValueRegister.__class__(self._get_float_return_reg)
+ self._cb.getGlobalPointerRegister = self._cb.getGlobalPointerRegister.__class__(self._get_global_pointer_reg)
+ self._cb.getImplicitlyDefinedRegisters = self._cb.getImplicitlyDefinedRegisters.__class__(self._get_implicitly_defined_regs)
+ self._cb.getIncomingRegisterValue = self._cb.getIncomingRegisterValue.__class__(self._get_incoming_reg_value)
+ self._cb.getIncomingFlagValue = self._cb.getIncomingFlagValue.__class__(self._get_incoming_flag_value)
+ self._cb.getIncomingVariableForParameterVariable = self._cb.getIncomingVariableForParameterVariable.__class__(self._get_incoming_var_for_parameter_var)
+ self._cb.getParameterVariableForIncomingVariable = self._cb.getParameterVariableForIncomingVariable.__class__(self._get_parameter_var_for_incoming_var)
+ self.handle = core.BNCreateCallingConvention(arch.handle, name, self._cb)
+ self.__class__._registered_calling_conventions.append(self)
+ else:
+ self.handle = handle
+ self.arch = binaryninja.architecture.CoreArchitecture._from_cache(core.BNGetCallingConventionArchitecture(self.handle))
+ self.__dict__["name"] = core.BNGetCallingConventionName(self.handle)
+ self.__dict__["arg_regs_share_index"] = core.BNAreArgumentRegistersSharedIndex(self.handle)
+ self.__dict__["stack_reserved_for_arg_regs"] = core.BNIsStackReservedForArgumentRegisters(self.handle)
+ self.__dict__["stack_adjusted_on_return"] = core.BNIsStackAdjustedOnReturn(self.handle)
+
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetCallerSavedRegisters(self.handle, count)
+ result = []
+ arch = self.arch
+ for i in range(0, count.value):
+ result.append(arch.get_reg_name(regs[i]))
+ core.BNFreeRegisterList(regs, count.value)
+ self.__dict__["caller_saved_regs"] = result
+
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetCalleeSavedRegisters(self.handle, count)
+ result = []
+ arch = self.arch
+ for i in range(0, count.value):
+ result.append(arch.get_reg_name(regs[i]))
+ core.BNFreeRegisterList(regs, count.value)
+ self.__dict__["callee_saved_regs"] = result
+
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetIntegerArgumentRegisters(self.handle, count)
+ result = []
+ arch = self.arch
+ for i in range(0, count.value):
+ result.append(arch.get_reg_name(regs[i]))
+ core.BNFreeRegisterList(regs, count.value)
+ self.__dict__["int_arg_regs"] = result
+
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetFloatArgumentRegisters(self.handle, count)
+ result = []
+ arch = self.arch
+ for i in range(0, count.value):
+ result.append(arch.get_reg_name(regs[i]))
+ core.BNFreeRegisterList(regs, count.value)
+ self.__dict__["float_arg_regs"] = result
+
+ reg = core.BNGetIntegerReturnValueRegister(self.handle)
+ if reg == 0xffffffff:
+ self.__dict__["int_return_reg"] = None
+ else:
+ self.__dict__["int_return_reg"] = self.arch.get_reg_name(reg)
+
+ reg = core.BNGetHighIntegerReturnValueRegister(self.handle)
+ if reg == 0xffffffff:
+ self.__dict__["high_int_return_reg"] = None
+ else:
+ self.__dict__["high_int_return_reg"] = self.arch.get_reg_name(reg)
+
+ reg = core.BNGetFloatReturnValueRegister(self.handle)
+ if reg == 0xffffffff:
+ self.__dict__["float_return_reg"] = None
+ else:
+ self.__dict__["float_return_reg"] = self.arch.get_reg_name(reg)
+
+ reg = core.BNGetGlobalPointerRegister(self.handle)
+ if reg == 0xffffffff:
+ self.__dict__["global_pointer_reg"] = None
+ else:
+ self.__dict__["global_pointer_reg"] = self.arch.get_reg_name(reg)
+
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetImplicitlyDefinedRegisters(self.handle, count)
+ result = []
+ arch = self.arch
+ for i in range(0, count.value):
+ result.append(arch.get_reg_name(regs[i]))
+ core.BNFreeRegisterList(regs, count.value)
+ self.__dict__["implicitly_defined_regs"] = result
+
+ self.confidence = confidence
+
+ def __del__(self):
+ if self.handle is not None:
+ core.BNFreeCallingConvention(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, CallingConvention):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, CallingConvention):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ def _get_caller_saved_regs(self, ctxt, count):
+ try:
+ regs = self.__class__.caller_saved_regs
+ count[0] = len(regs)
+ reg_buf = (ctypes.c_uint * len(regs))()
+ for i in range(0, len(regs)):
+ reg_buf[i] = self.arch.regs[regs[i]].index
+ result = ctypes.cast(reg_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, reg_buf)
+ return result.value
+ except:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_callee_saved_regs(self, ctxt, count):
+ try:
+ regs = self.__class__.callee_saved_regs
+ count[0] = len(regs)
+ reg_buf = (ctypes.c_uint * len(regs))()
+ for i in range(0, len(regs)):
+ reg_buf[i] = self.arch.regs[regs[i]].index
+ result = ctypes.cast(reg_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, reg_buf)
+ return result.value
+ except:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_int_arg_regs(self, ctxt, count):
+ try:
+ regs = self.__class__.int_arg_regs
+ count[0] = len(regs)
+ reg_buf = (ctypes.c_uint * len(regs))()
+ for i in range(0, len(regs)):
+ reg_buf[i] = self.arch.regs[regs[i]].index
+ result = ctypes.cast(reg_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, reg_buf)
+ return result.value
+ except:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_float_arg_regs(self, ctxt, count):
+ try:
+ regs = self.__class__.float_arg_regs
+ count[0] = len(regs)
+ reg_buf = (ctypes.c_uint * len(regs))()
+ for i in range(0, len(regs)):
+ reg_buf[i] = self.arch.regs[regs[i]].index
+ result = ctypes.cast(reg_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, reg_buf)
+ return result.value
+ except:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _free_register_list(self, ctxt, regs):
+ try:
+ buf = ctypes.cast(regs, ctypes.c_void_p)
+ if buf.value not in self._pending_reg_lists:
+ raise ValueError("freeing register list that wasn't allocated")
+ del self._pending_reg_lists[buf.value]
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _arg_regs_share_index(self, ctxt):
+ try:
+ return self.__class__.arg_regs_share_index
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _stack_reserved_for_arg_regs(self, ctxt):
+ try:
+ return self.__class__.stack_reserved_for_arg_regs
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _stack_adjusted_on_return(self, ctxt):
+ try:
+ return self.__class__.stack_adjusted_on_return
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_int_return_reg(self, ctxt):
+ try:
+ return self.arch.regs[self.__class__.int_return_reg].index
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_high_int_return_reg(self, ctxt):
+ try:
+ if self.__class__.high_int_return_reg is None:
+ return 0xffffffff
+ return self.arch.regs[self.__class__.high_int_return_reg].index
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_float_return_reg(self, ctxt):
+ try:
+ if self.__class__.float_return_reg is None:
+ return 0xffffffff
+ return self.arch.regs[self.__class__.float_int_return_reg].index
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_global_pointer_reg(self, ctxt):
+ try:
+ if self.__class__.global_pointer_reg is None:
+ return 0xffffffff
+ return self.arch.regs[self.__class__.global_pointer_reg].index
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _get_implicitly_defined_regs(self, ctxt, count):
+ try:
+ regs = self.__class__.implicitly_defined_regs
+ count[0] = len(regs)
+ reg_buf = (ctypes.c_uint * len(regs))()
+ for i in range(0, len(regs)):
+ reg_buf[i] = self.arch.regs[regs[i]].index
+ result = ctypes.cast(reg_buf, ctypes.c_void_p)
+ self._pending_reg_lists[result.value] = (result, reg_buf)
+ return result.value
+ except:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _get_incoming_reg_value(self, ctxt, reg, func, result):
+ try:
+ func_obj = binaryninja.function.Function(handle = core.BNNewFunctionReference(func))
+ reg_name = self.arch.get_reg_name(reg)
+ api_obj = self.perform_get_incoming_reg_value(reg_name, func_obj)._to_api_object()
+ except:
+ log.log_error(traceback.format_exc())
+ api_obj = binaryninja.function.RegisterValue()._to_api_object()
+ result[0].state = api_obj.state
+ result[0].value = api_obj.value
+
+ def _get_incoming_flag_value(self, ctxt, reg, func, result):
+ try:
+ func_obj = binaryninja.function.Function(handle = core.BNNewFunctionReference(func))
+ reg_name = self.arch.get_reg_name(reg)
+ api_obj = self.perform_get_incoming_flag_value(reg_name, func_obj)._to_api_object()
+ except:
+ log.log_error(traceback.format_exc())
+ api_obj = binaryninja.function.RegisterValue()._to_api_object()
+ result[0].state = api_obj.state
+ result[0].value = api_obj.value
+
+ def _get_incoming_var_for_parameter_var(self, ctxt, in_var, func, result):
+ try:
+ if func is None:
+ func_obj = None
+ else:
+ func_obj = binaryninja.function.Function(handle = core.BNNewFunctionReference(func))
+ in_var_obj = binaryninja.function.Variable(func_obj, in_var[0].type, in_var[0].index, in_var[0].storage)
+ out_var = self.perform_get_incoming_var_for_parameter_var(in_var_obj, func_obj)
+ result[0].type = out_var.source_type
+ result[0].index = out_var.index
+ result[0].storage = out_var.storage
+ except:
+ log.log_error(traceback.format_exc())
+ result[0].type = in_var[0].type
+ result[0].index = in_var[0].index
+ result[0].storage = in_var[0].storage
+
+ def _get_parameter_var_for_incoming_var(self, ctxt, in_var, func, result):
+ try:
+ if func is None:
+ func_obj = None
+ else:
+ func_obj = binaryninja.function.Function(handle = core.BNNewFunctionReference(func))
+ in_var_obj = binaryninja.function.Variable(func_obj, in_var[0].type, in_var[0].index, in_var[0].storage)
+ out_var = self.perform_get_parameter_var_for_incoming_var(in_var_obj, func_obj)
+ result[0].type = out_var.source_type
+ result[0].index = out_var.index
+ result[0].storage = out_var.storage
+ except:
+ log.log_error(traceback.format_exc())
+ result[0].type = in_var[0].type
+ result[0].index = in_var[0].index
+ result[0].storage = in_var[0].storage
+
+ def __repr__(self):
+ return "<calling convention: %s %s>" % (self.arch.name, self.name)
+
+ def __str__(self):
+ return self.name
+
+[docs] def perform_get_incoming_reg_value(self, reg, func):
+ reg_stack = self.arch.get_reg_stack_for_reg(reg)
+ if reg_stack is not None:
+ if reg == self.arch.reg_stacks[reg_stack].stack_top_reg:
+ return binaryninja.function.RegisterValue.constant(0)
+ return binaryninja.function.RegisterValue()
+
+[docs] def perform_get_incoming_flag_value(self, reg, func):
+ return binaryninja.function.RegisterValue()
+
+[docs] def perform_get_incoming_var_for_parameter_var(self, in_var, func):
+ in_buf = core.BNVariable()
+ in_buf.type = in_var.source_type
+ in_buf.index = in_var.index
+ in_buf.storage = in_var.storage
+ out_var = core.BNGetDefaultIncomingVariableForParameterVariable(self.handle, in_buf)
+ name = None
+ if (func is not None) and (out_var.type == VariableSourceType.RegisterVariableSourceType):
+ name = func.arch.get_reg_name(out_var.storage)
+ return binaryninja.function.Variable(func, out_var.type, out_var.index, out_var.storage, name)
+
+[docs] def perform_get_parameter_var_for_incoming_var(self, in_var, func):
+ in_buf = core.BNVariable()
+ in_buf.type = in_var.source_type
+ in_buf.index = in_var.index
+ in_buf.storage = in_var.storage
+ out_var = core.BNGetDefaultParameterVariableForIncomingVariable(self.handle, in_buf)
+ return binaryninja.function.Variable(func, out_var.type, out_var.index, out_var.storage)
+
+[docs] def with_confidence(self, confidence):
+ return CallingConvention(self.arch, handle = core.BNNewCallingConventionReference(self.handle),
+ confidence = confidence)
+
+[docs] def get_incoming_reg_value(self, reg, func):
+ reg_num = self.arch.get_reg_index(reg)
+ func_handle = None
+ if func is not None:
+ func_handle = func.handle
+ return binaryninja.function.RegisterValue(self.arch, core.BNGetIncomingRegisterValue(self.handle, reg_num, func_handle))
+
+[docs] def get_incoming_flag_value(self, flag, func):
+ reg_num = self.arch.get_flag_index(flag)
+ func_handle = None
+ if func is not None:
+ func_handle = func.handle
+ return binaryninja.function.RegisterValue(self.arch, core.BNGetIncomingFlagValue(self.handle, reg_num, func_handle))
+
+[docs] def get_incoming_var_for_parameter_var(self, in_var, func):
+ in_buf = core.BNVariable()
+ in_buf.type = in_var.source_type
+ in_buf.index = in_var.index
+ in_buf.storage = in_var.storage
+ if func is None:
+ func_obj = None
+ else:
+ func_obj = func.handle
+ out_var = core.BNGetIncomingVariableForParameterVariable(self.handle, in_buf, func_obj)
+ name = None
+ if (func is not None) and (out_var.type == VariableSourceType.RegisterVariableSourceType):
+ name = func.arch.get_reg_name(out_var.storage)
+ return binaryninja.function.Variable(func, out_var.type, out_var.index, out_var.storage, name)
+
+[docs] def get_parameter_var_for_incoming_var(self, in_var, func):
+ in_buf = core.BNVariable()
+ in_buf.type = in_var.source_type
+ in_buf.index = in_var.index
+ in_buf.storage = in_var.storage
+ if func is None:
+ func_obj = None
+ else:
+ func_obj = func.handle
+ out_var = core.BNGetParameterVariableForIncomingVariable(self.handle, in_buf, func_obj)
+ return binaryninja.function.Variable(func, out_var.type, out_var.index, out_var.storage)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import ctypes
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+
+# 2-3 compatibility
+from binaryninja import pyNativeStr
+from binaryninja import long
+
+
+[docs]class DataBuffer(object):
+[docs] def __init__(self, contents="", handle=None):
+ if handle is not None:
+ self.handle = core.handle_of_type(handle, core.BNDataBuffer)
+ elif isinstance(contents, int) or isinstance(contents, long):
+ self.handle = core.BNCreateDataBuffer(None, contents)
+ elif isinstance(contents, DataBuffer):
+ self.handle = core.BNDuplicateDataBuffer(contents.handle)
+ else:
+ self.handle = core.BNCreateDataBuffer(contents, len(contents))
+
+ def __del__(self):
+ core.BNFreeDataBuffer(self.handle)
+
+ def __len__(self):
+ return int(core.BNGetDataBufferLength(self.handle))
+
+ def __getitem__(self, i):
+ if isinstance(i, tuple):
+ result = ""
+ source = bytes(self)
+ for s in i:
+ result += source[s]
+ return result
+ elif isinstance(i, slice):
+ if i.step is not None:
+ i = i.indices(len(self))
+ start = i[0]
+ stop = i[1]
+ if stop <= start:
+ return ""
+ buf = ctypes.create_string_buffer(stop - start)
+ ctypes.memmove(buf, core.BNGetDataBufferContentsAt(self.handle, start), stop - start)
+ return buf.raw
+ else:
+ return bytes(self)[i]
+ elif i < 0:
+ if i >= -len(self):
+ return chr(core.BNGetDataBufferByte(self.handle, int(len(self) + i)))
+ raise IndexError("index out of range")
+ elif i < len(self):
+ return chr(core.BNGetDataBufferByte(self.handle, int(i)))
+ else:
+ raise IndexError("index out of range")
+
+ def __setitem__(self, i, value):
+ if isinstance(i, slice):
+ if i.step is not None:
+ raise IndexError("step not supported on assignment")
+ i = i.indices(len(self))
+ start = i[0]
+ stop = i[1]
+ if stop < start:
+ stop = start
+ if len(value) != (stop - start):
+ data = bytes(self)
+ data = data[0:start] + value + data[stop:]
+ core.BNSetDataBufferContents(self.handle, data, len(data))
+ else:
+ value = str(value)
+ buf = ctypes.create_string_buffer(value)
+ ctypes.memmove(core.BNGetDataBufferContentsAt(self.handle, start), buf, len(value))
+ elif i < 0:
+ if i >= -len(self):
+ if len(value) != 1:
+ raise ValueError("expected single byte for assignment")
+ value = str(value)
+ buf = ctypes.create_string_buffer(value)
+ ctypes.memmove(core.BNGetDataBufferContentsAt(self.handle, int(len(self) + i)), buf, 1)
+ else:
+ raise IndexError("index out of range")
+ elif i < len(self):
+ if len(value) != 1:
+ raise ValueError("expected single byte for assignment")
+ value = str(value)
+ buf = ctypes.create_string_buffer(value)
+ ctypes.memmove(core.BNGetDataBufferContentsAt(self.handle, int(i)), buf, 1)
+ else:
+ raise IndexError("index out of range")
+
+ def __str__(self):
+ buf = ctypes.create_string_buffer(len(self))
+ ctypes.memmove(buf, core.BNGetDataBufferContents(self.handle), len(self))
+ return pyNativeStr(buf.raw)
+
+ def __bytes__(self):
+ buf = ctypes.create_string_buffer(len(self))
+ ctypes.memmove(buf, core.BNGetDataBufferContents(self.handle), len(self))
+ return buf.raw
+
+
+
+
+
+
+
+
+
+[docs] def zlib_compress(self):
+ buf = core.BNZlibCompress(self.handle)
+ if buf is None:
+ return None
+ return DataBuffer(handle = buf)
+
+[docs] def zlib_decompress(self):
+ buf = core.BNZlibDecompress(self.handle)
+ if buf is None:
+ return None
+ return DataBuffer(handle = buf)
+
+
+
+
+
+
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+import traceback
+import ctypes
+
+import binaryninja
+from binaryninja import _binaryninjacore as core
+from binaryninja.filemetadata import FileMetadata
+from binaryninja.binaryview import BinaryView
+from binaryninja.function import (DisassemblyTextLine, InstructionTextToken)
+from binaryninja.enums import InstructionTextTokenType, TypeClass, HighlightStandardColor
+from binaryninja.log import log_error
+from binaryninja.types import Type
+from binaryninja import highlight
+
+[docs]class DataRenderer(object):
+ _registered_renderers = []
+
+ """
+ DataRenderer objects tell the Linear View how to render specific types.
+
+ The `perform_is_valid_for_data` method returns a boolean to indicate if your derived class
+ is able to render the type, given the `addr` and `context`. The `context` is a list of Type
+ objects which represents the chain of nested objects that is being displayed.
+
+ The `perform_get_lines_for_data` method returns a list of `DisassemblyTextLine` objects each one
+ representing a single line of Linear View output. The `prefix` variable is a list of `InstructionTextToken`'s
+ which have already been generated by other `DataRenderer`'s.
+
+ After defining the `DataRenderer` subclass you must then register it with the core. This is done by calling
+ either `register_type_specific` or `register_generic`. A "generic" type renderer is able to be overridden by
+ a "type specific" renderer. For instance there is a generic struct render which renders any struct that hasn't
+ been explicitly overridden by a "type specific" renderer.
+
+ In the below example we create a data renderer that overrides the default display for `struct BAR`.
+
+ class BarDataRenderer(DataRenderer):
+ def __init__(self):
+ DataRenderer.__init__(self)
+
+ def perform_is_valid_for_data(self, ctxt, view, addr, type, context):
+ return DataRenderer.is_type_of_struct_name(type, "BAR", context)
+
+ def perform_get_lines_for_data(self, ctxt, view, addr, type, prefix, width, context):
+ prefix.append(InstructionTextToken(InstructionTextTokenType.TextToken, "I'm in ur BAR"))
+ return [DisassemblyTextLine(prefix, addr)]
+
+ def __del__(self):
+ pass
+
+ BarDataRenderer().register_type_specific()
+ """
+[docs] def __init__(self, context=None):
+ self._cb = core.BNCustomDataRenderer()
+ self._cb.context = context
+ self._cb.freeObject = self._cb.freeObject.__class__(self._free_object)
+ self._cb.isValidForData = self._cb.isValidForData.__class__(self._is_valid_for_data)
+ self._cb.getLinesForData = self._cb.getLinesForData.__class__(self._get_lines_for_data)
+ self.handle = core.BNCreateDataRenderer(self._cb)
+
+[docs] @classmethod
+ def is_type_of_struct_name(cls, type, name, context):
+ return (type.type_class == TypeClass.StructureTypeClass and len(context) > 0
+ and context[0].type_class == TypeClass.NamedTypeReferenceClass and
+ context[0].named_type_reference.name == name)
+
+[docs] def register_type_specific(self):
+ core.BNRegisterTypeSpecificDataRenderer(core.BNGetDataRendererContainer(), self.handle)
+ self.__class__._registered_renderers.append(self)
+
+[docs] def register_generic(self):
+ core.BNRegisterGenericDataRenderer(core.BNGetDataRendererContainer(), self.handle)
+ self.__class__._registered_renderers.append(self)
+
+ def _free_object(self, ctxt):
+ try:
+ self.perform_free_object(ctxt)
+ except:
+ log_error(traceback.format_exc())
+
+ def _is_valid_for_data(self, ctxt, view, addr, type, context, ctxCount):
+ try:
+ file_metadata = FileMetadata(handle=core.BNGetFileForView(view))
+ view = BinaryView(file_metadata=file_metadata, handle=core.BNNewViewReference(view))
+ type = Type(handle=core.BNNewTypeReference(type))
+ pycontext = []
+ for i in range(0, ctxCount):
+ pycontext.append(Type(core.BNNewTypeReference(context[i])))
+ return self.perform_is_valid_for_data(ctxt, view, addr, type, pycontext)
+ except:
+ log_error(traceback.format_exc())
+ return False
+
+ def _get_lines_for_data(self, ctxt, view, addr, type, prefix, prefixCount, width, count, typeCtx, ctxCount):
+ try:
+ file_metadata = FileMetadata(handle=core.BNGetFileForView(view))
+ view = BinaryView(file_metadata=file_metadata, handle=core.BNNewViewReference(view))
+ type = Type(handle=core.BNNewTypeReference(type))
+
+ prefixTokens = InstructionTextToken.get_instruction_lines(prefix, prefixCount)
+ pycontext = []
+ for i in range(ctxCount):
+ pycontext.append(Type(core.BNNewTypeReference(typeCtx[i])))
+
+ result = self.perform_get_lines_for_data(ctxt, view, addr, type, prefixTokens, width, pycontext)
+
+ count[0] = len(result)
+ line_buf = (core.BNDisassemblyTextLine * len(result))()
+ for i in range(len(result)):
+ line = result[i]
+ color = line.highlight
+ if not isinstance(color, HighlightStandardColor) and not isinstance(color, highlight.HighlightColor):
+ raise ValueError("Specified color is not one of HighlightStandardColor, highlight.HighlightColor")
+ if isinstance(color, HighlightStandardColor):
+ color = highlight.HighlightColor(color)
+ line_buf[i].highlight = color._get_core_struct()
+ if line.address is None:
+ if len(line.tokens) > 0:
+ line_buf[i].addr = line.tokens[0].address
+ else:
+ line_buf[i].addr = 0
+ else:
+ line_buf[i].addr = line.address
+ if line.il_instruction is not None:
+ line_buf[i].instrIndex = line.il_instruction.instr_index
+ else:
+ line_buf[i].instrIndex = 0xffffffffffffffff
+
+ line_buf[i].count = len(line.tokens)
+ line_buf[i].tokens = InstructionTextToken.get_instruction_lines(line.tokens)
+
+ return ctypes.cast(line_buf, ctypes.c_void_p).value
+ except:
+ log_error(traceback.format_exc())
+ return None
+
+
+
+
+
+[docs] def perform_get_lines_for_data(self, ctxt, view, addr, type, prefix, width, context):
+ return []
+
+ def __del__(self):
+ pass
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import ctypes
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+from binaryninja import types
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import pyNativeStr
+
+
+[docs]def get_qualified_name(names):
+ """
+ ``get_qualified_name`` gets a qualified name for the provided name list.
+
+ :param list(str) names: name list to qualify
+ :return: a qualified name
+ :rtype: str
+ :Example:
+
+ >>> type, name = demangle_ms(Architecture["x86_64"], "?testf@Foobar@@SA?AW4foo@1@W421@@Z")
+ >>> get_qualified_name(name)
+ 'Foobar::testf'
+ >>>
+ """
+ return "::".join(names)
+
+
+[docs]def demangle_ms(arch, mangled_name):
+ """
+ ``demangle_ms`` demangles a mangled Microsoft Visual Studio C++ name to a Type object.
+
+ :param Architecture arch: Architecture for the symbol. Required for pointer and integer sizes.
+ :param str mangled_name: a mangled Microsoft Visual Studio C++ name
+ :return: returns tuple of (Type, demangled_name) or (None, mangled_name) on error
+ :rtype: Tuple
+ :Example:
+
+ >>> demangle_ms(Architecture["x86_64"], "?testf@Foobar@@SA?AW4foo@1@W421@@Z")
+ (<type: public: static enum Foobar::foo __cdecl (enum Foobar::foo)>, ['Foobar', 'testf'])
+ >>>
+ """
+ handle = ctypes.POINTER(core.BNType)()
+ outName = ctypes.POINTER(ctypes.c_char_p)()
+ outSize = ctypes.c_ulonglong()
+ names = []
+ if core.BNDemangleMS(arch.handle, mangled_name, ctypes.byref(handle), ctypes.byref(outName), ctypes.byref(outSize)):
+ for i in range(outSize.value):
+ names.append(pyNativeStr(outName[i]))
+ core.BNFreeDemangledName(ctypes.byref(outName), outSize.value)
+ return (types.Type(handle), names)
+ return (None, mangled_name)
+
+
+[docs]def demangle_gnu3(arch, mangled_name):
+ handle = ctypes.POINTER(core.BNType)()
+ outName = ctypes.POINTER(ctypes.c_char_p)()
+ outSize = ctypes.c_ulonglong()
+ names = []
+ if core.BNDemangleGNU3(arch.handle, mangled_name, ctypes.byref(handle), ctypes.byref(outName), ctypes.byref(outSize)):
+ for i in range(outSize.value):
+ names.append(pyNativeStr(outName[i]))
+ core.BNFreeDemangledName(ctypes.byref(outName), outSize.value)
+ if not handle:
+ return (None, names)
+ return (types.Type(handle), names)
+ return (None, mangled_name)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+import abc
+import ctypes
+import sys
+import traceback
+
+# Binary Ninja Components
+import binaryninja._binaryninjacore as core
+
+import binaryninja
+from binaryninja.settings import Settings
+from binaryninja import with_metaclass
+from binaryninja import startup
+from binaryninja import log
+
+# 2-3 compatibility
+from binaryninja import pyNativeStr
+from binaryninja import range
+
+
+[docs]class DownloadInstance(object):
+ _registered_instances = []
+[docs] def __init__(self, provider, handle = None):
+ if handle is None:
+ self._cb = core.BNDownloadInstanceCallbacks()
+ self._cb.context = 0
+ self._cb.destroyInstance = self._cb.destroyInstance.__class__(self._destroy_instance)
+ self._cb.performRequest = self._cb.performRequest.__class__(self._perform_request)
+ self.handle = core.BNInitDownloadInstance(provider.handle, self._cb)
+ self.__class__._registered_instances.append(self)
+ else:
+ self.handle = core.handle_of_type(handle, core.BNDownloadInstance)
+ self._must_free = handle is not None
+
+ def __del__(self):
+ if self._must_free:
+ core.BNFreeDownloadInstance(self.handle)
+
+ def _destroy_instance(self, ctxt):
+ try:
+ if self in self.__class__._registered_instances:
+ self.__class__._registered_instances.remove(self)
+ self.perform_destroy_instance()
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _perform_request(self, ctxt, url):
+ try:
+ return self.perform_request(url)
+ except:
+ log.log_error(traceback.format_exc())
+ return -1
+
+
+
+ @abc.abstractmethod
+ def perform_request(self, ctxt, url):
+ raise NotImplementedError
+
+[docs] def perform_request(self, url, callbacks):
+ return core.BNPerformDownloadRequest(self.handle, url, callbacks)
+
+
+class _DownloadProviderMetaclass(type):
+ @property
+ def list(self):
+ """List all DownloadProvider types (read-only)"""
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ types = core.BNGetDownloadProviderList(count)
+ result = []
+ for i in range(0, count.value):
+ result.append(DownloadProvider(types[i]))
+ core.BNFreeDownloadProviderList(types)
+ return result
+
+ def __iter__(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ types = core.BNGetDownloadProviderList(count)
+ try:
+ for i in range(0, count.value):
+ yield DownloadProvider(types[i])
+ finally:
+ core.BNFreeDownloadProviderList(types)
+
+ def __getitem__(self, value):
+ binaryninja._init_plugins()
+ provider = core.BNGetDownloadProviderByName(str(value))
+ if provider is None:
+ raise KeyError("'%s' is not a valid download provider" % str(value))
+ return DownloadProvider(provider)
+
+ def __setattr__(self, name, value):
+ try:
+ type.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+
+[docs]class DownloadProvider(with_metaclass(_DownloadProviderMetaclass, object)):
+ name = None
+ instance_class = None
+ _registered_providers = []
+
+[docs] def __init__(self, handle = None):
+ if handle is not None:
+ self.handle = core.handle_of_type(handle, core.BNDownloadProvider)
+ self.__dict__["name"] = core.BNGetDownloadProviderName(handle)
+
+[docs] def register(self):
+ self._cb = core.BNDownloadProviderCallbacks()
+ self._cb.context = 0
+ self._cb.createInstance = self._cb.createInstance.__class__(self._create_instance)
+ self.handle = core.BNRegisterDownloadProvider(self.__class__.name, self._cb)
+ self.__class__._registered_providers.append(self)
+
+ def _create_instance(self, ctxt):
+ try:
+ result = self.__class__.instance_class(self)
+ if result is None:
+ return None
+ return ctypes.cast(core.BNNewDownloadInstanceReference(result.handle), ctypes.c_void_p).value
+ except:
+ log.log_error(traceback.format_exc())
+ return None
+
+[docs] def create_instance(self):
+ result = core.BNCreateDownloadProviderInstance(self.handle)
+ if result is None:
+ return None
+ return DownloadInstance(self, handle = result)
+
+
+if (sys.platform != "win32") and (sys.version_info >= (2, 7, 9)):
+ try:
+ from urllib.request import urlopen, build_opener, install_opener, ProxyHandler
+ from urllib.error import URLError
+ except ImportError:
+ from urllib2 import urlopen, build_opener, install_opener, ProxyHandler, URLError
+
+ class PythonDownloadInstance(DownloadInstance):
+ def __init__(self, provider):
+ super(PythonDownloadInstance, self).__init__(provider)
+
+ @abc.abstractmethod
+ def perform_destroy_instance(self):
+ pass
+
+ @abc.abstractmethod
+ def perform_request(self, url):
+ try:
+ proxy_setting = Settings().get_string('downloadClient.httpsProxy')
+ if proxy_setting:
+ opener = build_opener(ProxyHandler({'https': proxy_setting}))
+ install_opener(opener)
+
+ r = urlopen(pyNativeStr(url))
+ total_size = int(r.headers.get('content-length', 0))
+ bytes_sent = 0
+ while True:
+ data = r.read(4096)
+ if not data:
+ break
+ raw_bytes = (ctypes.c_ubyte * len(data)).from_buffer_copy(data)
+ bytes_wrote = core.BNWriteDataForDownloadInstance(self.handle, raw_bytes, len(raw_bytes))
+ if bytes_wrote != len(raw_bytes):
+ core.BNSetErrorForDownloadInstance(self.handle, "Bytes written mismatch!")
+ return -1
+ bytes_sent = bytes_sent + bytes_wrote
+ continue_download = core.BNNotifyProgressForDownloadInstance(self.handle, bytes_sent, total_size)
+ if continue_download is False:
+ core.BNSetErrorForDownloadInstance(self.handle, "Download aborted!")
+ return -1
+
+ if not bytes_sent:
+ core.BNSetErrorForDownloadInstance(self.handle, "Received no data!")
+ return -1
+
+ except URLError as e:
+ core.BNSetErrorForDownloadInstance(self.handle, e.__class__.__name__)
+ return -1
+ except:
+ core.BNSetErrorForDownloadInstance(self.handle, "Unknown Exception!")
+ log.log_error(traceback.format_exc())
+ return -1
+
+ return 0
+
+ class PythonDownloadProvider(DownloadProvider):
+ name = "PythonDownloadProvider"
+ instance_class = PythonDownloadInstance
+
+ PythonDownloadProvider().register()
+else:
+ try:
+ import requests
+ if sys.platform != "win32":
+ from requests import pyopenssl
+[docs] class PythonDownloadInstance(DownloadInstance):
+
+
+
+
+[docs] @abc.abstractmethod
+ def perform_request(self, url):
+ try:
+ proxy_setting = Settings().get_string('downloadClient.httpsProxy')
+ if proxy_setting:
+ proxies = {"https": proxy_setting}
+ else:
+ proxies = None
+
+ r = requests.get(pyNativeStr(url), proxies=proxies)
+ if not r.ok:
+ core.BNSetErrorForDownloadInstance(self.handle, "Received error from server")
+ return -1
+ data = r.content
+ if len(data) == 0:
+ core.BNSetErrorForDownloadInstance(self.handle, "No data received from server!")
+ return -1
+ raw_bytes = (ctypes.c_ubyte * len(data)).from_buffer_copy(data)
+ bytes_wrote = core.BNWriteDataForDownloadInstance(self.handle, raw_bytes, len(raw_bytes))
+ if bytes_wrote != len(raw_bytes):
+ core.BNSetErrorForDownloadInstance(self.handle, "Bytes written mismatch!")
+ return -1
+ continue_download = core.BNNotifyProgressForDownloadInstance(self.handle, bytes_wrote, bytes_wrote)
+ if continue_download is False:
+ core.BNSetErrorForDownloadInstance(self.handle, "Download aborted!")
+ return -1
+ except requests.RequestException as e:
+ core.BNSetErrorForDownloadInstance(self.handle, e.__class__.__name__)
+ return -1
+ except:
+ core.BNSetErrorForDownloadInstance(self.handle, "Unknown Exception!")
+ log.log_error(traceback.format_exc())
+ return -1
+
+ return 0
+
+[docs] class PythonDownloadProvider(DownloadProvider):
+ name = "PythonDownloadProvider"
+ instance_class = PythonDownloadInstance
+
+ PythonDownloadProvider().register()
+ except ImportError:
+ if sys.platform == "win32":
+ log.log_error("The pip requests package is required for network connectivity!")
+ log.log_error("Please install the requests package into the selected Python environment:")
+ log.log_error(" python -m pip install requests")
+ else:
+ log.log_error("On Python versions below 2.7.9, the pip requests[security] package is required for network connectivity!")
+ log.log_error("On an Ubuntu 14.04 install, the following three commands are sufficient to enable networking for the current user:")
+ log.log_error(" sudo apt install python-pip")
+ log.log_error(" python -m pip install pip --upgrade --user")
+ log.log_error(" python -m pip install requests[security] --upgrade --user")
+
+
+import enum
+
+[docs]class ActionType(enum.IntEnum):
+ TemporaryAction = 0
+ DataModificationAction = 1
+ AnalysisAction = 2
+ DataModificationAndAnalysisAction = 3
+
+
+[docs]class AnalysisSkipReason(enum.IntEnum):
+ NoSkipReason = 0
+ AlwaysSkipReason = 1
+ ExceedFunctionSizeSkipReason = 2
+ ExceedFunctionAnalysisTimeSkipReason = 3
+ ExceedFunctionUpdateCountSkipReason = 4
+ NewAutoFunctionAnalysisSuppressedReason = 5
+
+
+[docs]class AnalysisState(enum.IntEnum):
+ IdleState = 0
+ DisassembleState = 1
+ AnalyzeState = 2
+ ExtendedAnalyzeState = 3
+
+
+[docs]class BranchType(enum.IntEnum):
+ UnconditionalBranch = 0
+ FalseBranch = 1
+ TrueBranch = 2
+ CallDestination = 3
+ FunctionReturn = 4
+ SystemCall = 5
+ IndirectBranch = 6
+ UnresolvedBranch = 127
+
+
+[docs]class CallingConventionName(enum.IntEnum):
+ NoCallingConvention = 0
+ CdeclCallingConvention = 1
+ PascalCallingConvention = 2
+ ThisCallCallingConvention = 3
+ STDCallCallingConvention = 4
+ FastcallCallingConvention = 5
+ CLRCallCallingConvention = 6
+ EabiCallCallingConvention = 7
+ VectorCallCallingConvention = 8
+
+
+[docs]class DisassemblyOption(enum.IntEnum):
+ ShowAddress = 0
+ ShowOpcode = 1
+ ExpandLongOpcode = 2
+ ShowVariablesAtTopOfGraph = 3
+ ShowVariableTypesWhenAssigned = 4
+ ShowDefaultRegisterTypes = 5
+ ShowCallParameterNames = 6
+ ShowRegisterHighlight = 7
+ GroupLinearDisassemblyFunctions = 64
+ ShowFlagUsage = 128
+
+
+
+
+
+
+
+
+[docs]class FlagRole(enum.IntEnum):
+ SpecialFlagRole = 0
+ ZeroFlagRole = 1
+ PositiveSignFlagRole = 2
+ NegativeSignFlagRole = 3
+ CarryFlagRole = 4
+ OverflowFlagRole = 5
+ HalfCarryFlagRole = 6
+ EvenParityFlagRole = 7
+ OddParityFlagRole = 8
+ OrderedFlagRole = 9
+ UnorderedFlagRole = 10
+
+
+[docs]class FormInputFieldType(enum.IntEnum):
+ LabelFormField = 0
+ SeparatorFormField = 1
+ TextLineFormField = 2
+ MultilineTextFormField = 3
+ IntegerFormField = 4
+ AddressFormField = 5
+ ChoiceFormField = 6
+ OpenFileNameFormField = 7
+ SaveFileNameFormField = 8
+ DirectoryNameFormField = 9
+
+
+[docs]class FunctionAnalysisSkipOverride(enum.IntEnum):
+ DefaultFunctionAnalysisSkip = 0
+ NeverSkipFunctionAnalysis = 1
+ AlwaysSkipFunctionAnalysis = 2
+
+
+[docs]class FunctionGraphType(enum.IntEnum):
+ NormalFunctionGraph = 0
+ LowLevelILFunctionGraph = 1
+ LiftedILFunctionGraph = 2
+ LowLevelILSSAFormFunctionGraph = 3
+ MediumLevelILFunctionGraph = 4
+ MediumLevelILSSAFormFunctionGraph = 5
+ MappedMediumLevelILFunctionGraph = 6
+ MappedMediumLevelILSSAFormFunctionGraph = 7
+
+
+[docs]class HighlightColorStyle(enum.IntEnum):
+ StandardHighlightColor = 0
+ MixedHighlightColor = 1
+ CustomHighlightColor = 2
+
+
+[docs]class HighlightStandardColor(enum.IntEnum):
+ NoHighlightColor = 0
+ BlueHighlightColor = 1
+ GreenHighlightColor = 2
+ CyanHighlightColor = 3
+ RedHighlightColor = 4
+ MagentaHighlightColor = 5
+ YellowHighlightColor = 6
+ OrangeHighlightColor = 7
+ WhiteHighlightColor = 8
+ BlackHighlightColor = 9
+
+
+[docs]class ILBranchDependence(enum.IntEnum):
+ NotBranchDependent = 0
+ TrueBranchDependent = 1
+ FalseBranchDependent = 2
+
+
+[docs]class ImplicitRegisterExtend(enum.IntEnum):
+ NoExtend = 0
+ ZeroExtendToFullWidth = 1
+ SignExtendToFullWidth = 2
+
+
+[docs]class InstructionTextTokenContext(enum.IntEnum):
+ NoTokenContext = 0
+ LocalVariableTokenContext = 1
+ DataVariableTokenContext = 2
+ FunctionReturnTokenContext = 3
+
+
+[docs]class InstructionTextTokenType(enum.IntEnum):
+ TextToken = 0
+ InstructionToken = 1
+ OperandSeparatorToken = 2
+ RegisterToken = 3
+ IntegerToken = 4
+ PossibleAddressToken = 5
+ BeginMemoryOperandToken = 6
+ EndMemoryOperandToken = 7
+ FloatingPointToken = 8
+ AnnotationToken = 9
+ CodeRelativeAddressToken = 10
+ ArgumentNameToken = 11
+ HexDumpByteValueToken = 12
+ HexDumpSkippedByteToken = 13
+ HexDumpInvalidByteToken = 14
+ HexDumpTextToken = 15
+ OpcodeToken = 16
+ StringToken = 17
+ CharacterConstantToken = 18
+ KeywordToken = 19
+ TypeNameToken = 20
+ FieldNameToken = 21
+ NameSpaceToken = 22
+ NameSpaceSeparatorToken = 23
+ CodeSymbolToken = 64
+ DataSymbolToken = 65
+ LocalVariableToken = 66
+ ImportToken = 67
+ AddressDisplayToken = 68
+ IndirectImportToken = 69
+ ExternalSymbolToken = 70
+
+
+[docs]class IntegerDisplayType(enum.IntEnum):
+ DefaultIntegerDisplayType = 0
+ BinaryDisplayType = 1
+ SignedOctalDisplayType = 2
+ UnsignedOctalDisplayType = 3
+ SignedDecimalDisplayType = 4
+ UnsignedDecimalDisplayType = 5
+ SignedHexadecimalDisplayType = 6
+ UnsignedHexadecimalDisplayType = 7
+ CharacterConstantDisplayType = 8
+ PointerDisplayType = 9
+
+
+[docs]class LinearDisassemblyLineType(enum.IntEnum):
+ BlankLineType = 0
+ CodeDisassemblyLineType = 1
+ DataVariableLineType = 2
+ HexDumpLineType = 3
+ FunctionHeaderLineType = 4
+ FunctionHeaderStartLineType = 5
+ FunctionHeaderEndLineType = 6
+ FunctionContinuationLineType = 7
+ LocalVariableLineType = 8
+ LocalVariableListEndLineType = 9
+ FunctionEndLineType = 10
+ NoteStartLineType = 11
+ NoteLineType = 12
+ NoteEndLineType = 13
+ SectionStartLineType = 14
+ SectionEndLineType = 15
+ SectionSeparatorLineType = 16
+ NonContiguousSeparatorLineType = 17
+
+
+[docs]class LogLevel(enum.IntEnum):
+ DebugLog = 0
+ InfoLog = 1
+ WarningLog = 2
+ ErrorLog = 3
+ AlertLog = 4
+
+
+[docs]class LowLevelILFlagCondition(enum.IntEnum):
+ LLFC_E = 0
+ LLFC_NE = 1
+ LLFC_SLT = 2
+ LLFC_ULT = 3
+ LLFC_SLE = 4
+ LLFC_ULE = 5
+ LLFC_SGE = 6
+ LLFC_UGE = 7
+ LLFC_SGT = 8
+ LLFC_UGT = 9
+ LLFC_NEG = 10
+ LLFC_POS = 11
+ LLFC_O = 12
+ LLFC_NO = 13
+ LLFC_FE = 14
+ LLFC_FNE = 15
+ LLFC_FLT = 16
+ LLFC_FLE = 17
+ LLFC_FGE = 18
+ LLFC_FGT = 19
+ LLFC_FO = 20
+ LLFC_FUO = 21
+
+
+[docs]class LowLevelILOperation(enum.IntEnum):
+ LLIL_NOP = 0
+ LLIL_SET_REG = 1
+ LLIL_SET_REG_SPLIT = 2
+ LLIL_SET_FLAG = 3
+ LLIL_SET_REG_STACK_REL = 4
+ LLIL_REG_STACK_PUSH = 5
+ LLIL_LOAD = 6
+ LLIL_STORE = 7
+ LLIL_PUSH = 8
+ LLIL_POP = 9
+ LLIL_REG = 10
+ LLIL_REG_SPLIT = 11
+ LLIL_REG_STACK_REL = 12
+ LLIL_REG_STACK_POP = 13
+ LLIL_REG_STACK_FREE_REG = 14
+ LLIL_REG_STACK_FREE_REL = 15
+ LLIL_CONST = 16
+ LLIL_CONST_PTR = 17
+ LLIL_EXTERN_PTR = 18
+ LLIL_FLOAT_CONST = 19
+ LLIL_FLAG = 20
+ LLIL_FLAG_BIT = 21
+ LLIL_ADD = 22
+ LLIL_ADC = 23
+ LLIL_SUB = 24
+ LLIL_SBB = 25
+ LLIL_AND = 26
+ LLIL_OR = 27
+ LLIL_XOR = 28
+ LLIL_LSL = 29
+ LLIL_LSR = 30
+ LLIL_ASR = 31
+ LLIL_ROL = 32
+ LLIL_RLC = 33
+ LLIL_ROR = 34
+ LLIL_RRC = 35
+ LLIL_MUL = 36
+ LLIL_MULU_DP = 37
+ LLIL_MULS_DP = 38
+ LLIL_DIVU = 39
+ LLIL_DIVU_DP = 40
+ LLIL_DIVS = 41
+ LLIL_DIVS_DP = 42
+ LLIL_MODU = 43
+ LLIL_MODU_DP = 44
+ LLIL_MODS = 45
+ LLIL_MODS_DP = 46
+ LLIL_NEG = 47
+ LLIL_NOT = 48
+ LLIL_SX = 49
+ LLIL_ZX = 50
+ LLIL_LOW_PART = 51
+ LLIL_JUMP = 52
+ LLIL_JUMP_TO = 53
+ LLIL_CALL = 54
+ LLIL_CALL_STACK_ADJUST = 55
+ LLIL_TAILCALL = 56
+ LLIL_RET = 57
+ LLIL_NORET = 58
+ LLIL_IF = 59
+ LLIL_GOTO = 60
+ LLIL_FLAG_COND = 61
+ LLIL_FLAG_GROUP = 62
+ LLIL_CMP_E = 63
+ LLIL_CMP_NE = 64
+ LLIL_CMP_SLT = 65
+ LLIL_CMP_ULT = 66
+ LLIL_CMP_SLE = 67
+ LLIL_CMP_ULE = 68
+ LLIL_CMP_SGE = 69
+ LLIL_CMP_UGE = 70
+ LLIL_CMP_SGT = 71
+ LLIL_CMP_UGT = 72
+ LLIL_TEST_BIT = 73
+ LLIL_BOOL_TO_INT = 74
+ LLIL_ADD_OVERFLOW = 75
+ LLIL_SYSCALL = 76
+ LLIL_BP = 77
+ LLIL_TRAP = 78
+ LLIL_INTRINSIC = 79
+ LLIL_UNDEF = 80
+ LLIL_UNIMPL = 81
+ LLIL_UNIMPL_MEM = 82
+ LLIL_FADD = 83
+ LLIL_FSUB = 84
+ LLIL_FMUL = 85
+ LLIL_FDIV = 86
+ LLIL_FSQRT = 87
+ LLIL_FNEG = 88
+ LLIL_FABS = 89
+ LLIL_FLOAT_TO_INT = 90
+ LLIL_INT_TO_FLOAT = 91
+ LLIL_FLOAT_CONV = 92
+ LLIL_ROUND_TO_INT = 93
+ LLIL_FLOOR = 94
+ LLIL_CEIL = 95
+ LLIL_FTRUNC = 96
+ LLIL_FCMP_E = 97
+ LLIL_FCMP_NE = 98
+ LLIL_FCMP_LT = 99
+ LLIL_FCMP_LE = 100
+ LLIL_FCMP_GE = 101
+ LLIL_FCMP_GT = 102
+ LLIL_FCMP_O = 103
+ LLIL_FCMP_UO = 104
+ LLIL_SET_REG_SSA = 105
+ LLIL_SET_REG_SSA_PARTIAL = 106
+ LLIL_SET_REG_SPLIT_SSA = 107
+ LLIL_SET_REG_STACK_REL_SSA = 108
+ LLIL_SET_REG_STACK_ABS_SSA = 109
+ LLIL_REG_SPLIT_DEST_SSA = 110
+ LLIL_REG_STACK_DEST_SSA = 111
+ LLIL_REG_SSA = 112
+ LLIL_REG_SSA_PARTIAL = 113
+ LLIL_REG_SPLIT_SSA = 114
+ LLIL_REG_STACK_REL_SSA = 115
+ LLIL_REG_STACK_ABS_SSA = 116
+ LLIL_REG_STACK_FREE_REL_SSA = 117
+ LLIL_REG_STACK_FREE_ABS_SSA = 118
+ LLIL_SET_FLAG_SSA = 119
+ LLIL_FLAG_SSA = 120
+ LLIL_FLAG_BIT_SSA = 121
+ LLIL_CALL_SSA = 122
+ LLIL_SYSCALL_SSA = 123
+ LLIL_TAILCALL_SSA = 124
+ LLIL_CALL_PARAM = 125
+ LLIL_CALL_STACK_SSA = 126
+ LLIL_CALL_OUTPUT_SSA = 127
+ LLIL_LOAD_SSA = 128
+ LLIL_STORE_SSA = 129
+ LLIL_INTRINSIC_SSA = 130
+ LLIL_REG_PHI = 131
+ LLIL_REG_STACK_PHI = 132
+ LLIL_FLAG_PHI = 133
+ LLIL_MEM_PHI = 134
+
+
+[docs]class MediumLevelILOperation(enum.IntEnum):
+ MLIL_NOP = 0
+ MLIL_SET_VAR = 1
+ MLIL_SET_VAR_FIELD = 2
+ MLIL_SET_VAR_SPLIT = 3
+ MLIL_LOAD = 4
+ MLIL_LOAD_STRUCT = 5
+ MLIL_STORE = 6
+ MLIL_STORE_STRUCT = 7
+ MLIL_VAR = 8
+ MLIL_VAR_FIELD = 9
+ MLIL_VAR_SPLIT = 10
+ MLIL_ADDRESS_OF = 11
+ MLIL_ADDRESS_OF_FIELD = 12
+ MLIL_CONST = 13
+ MLIL_CONST_PTR = 14
+ MLIL_EXTERN_PTR = 15
+ MLIL_FLOAT_CONST = 16
+ MLIL_IMPORT = 17
+ MLIL_ADD = 18
+ MLIL_ADC = 19
+ MLIL_SUB = 20
+ MLIL_SBB = 21
+ MLIL_AND = 22
+ MLIL_OR = 23
+ MLIL_XOR = 24
+ MLIL_LSL = 25
+ MLIL_LSR = 26
+ MLIL_ASR = 27
+ MLIL_ROL = 28
+ MLIL_RLC = 29
+ MLIL_ROR = 30
+ MLIL_RRC = 31
+ MLIL_MUL = 32
+ MLIL_MULU_DP = 33
+ MLIL_MULS_DP = 34
+ MLIL_DIVU = 35
+ MLIL_DIVU_DP = 36
+ MLIL_DIVS = 37
+ MLIL_DIVS_DP = 38
+ MLIL_MODU = 39
+ MLIL_MODU_DP = 40
+ MLIL_MODS = 41
+ MLIL_MODS_DP = 42
+ MLIL_NEG = 43
+ MLIL_NOT = 44
+ MLIL_SX = 45
+ MLIL_ZX = 46
+ MLIL_LOW_PART = 47
+ MLIL_JUMP = 48
+ MLIL_JUMP_TO = 49
+ MLIL_RET_HINT = 50
+ MLIL_CALL = 51
+ MLIL_CALL_UNTYPED = 52
+ MLIL_CALL_OUTPUT = 53
+ MLIL_CALL_PARAM = 54
+ MLIL_RET = 55
+ MLIL_NORET = 56
+ MLIL_IF = 57
+ MLIL_GOTO = 58
+ MLIL_CMP_E = 59
+ MLIL_CMP_NE = 60
+ MLIL_CMP_SLT = 61
+ MLIL_CMP_ULT = 62
+ MLIL_CMP_SLE = 63
+ MLIL_CMP_ULE = 64
+ MLIL_CMP_SGE = 65
+ MLIL_CMP_UGE = 66
+ MLIL_CMP_SGT = 67
+ MLIL_CMP_UGT = 68
+ MLIL_TEST_BIT = 69
+ MLIL_BOOL_TO_INT = 70
+ MLIL_ADD_OVERFLOW = 71
+ MLIL_SYSCALL = 72
+ MLIL_SYSCALL_UNTYPED = 73
+ MLIL_TAILCALL = 74
+ MLIL_TAILCALL_UNTYPED = 75
+ MLIL_INTRINSIC = 76
+ MLIL_FREE_VAR_SLOT = 77
+ MLIL_BP = 78
+ MLIL_TRAP = 79
+ MLIL_UNDEF = 80
+ MLIL_UNIMPL = 81
+ MLIL_UNIMPL_MEM = 82
+ MLIL_FADD = 83
+ MLIL_FSUB = 84
+ MLIL_FMUL = 85
+ MLIL_FDIV = 86
+ MLIL_FSQRT = 87
+ MLIL_FNEG = 88
+ MLIL_FABS = 89
+ MLIL_FLOAT_TO_INT = 90
+ MLIL_INT_TO_FLOAT = 91
+ MLIL_FLOAT_CONV = 92
+ MLIL_ROUND_TO_INT = 93
+ MLIL_FLOOR = 94
+ MLIL_CEIL = 95
+ MLIL_FTRUNC = 96
+ MLIL_FCMP_E = 97
+ MLIL_FCMP_NE = 98
+ MLIL_FCMP_LT = 99
+ MLIL_FCMP_LE = 100
+ MLIL_FCMP_GE = 101
+ MLIL_FCMP_GT = 102
+ MLIL_FCMP_O = 103
+ MLIL_FCMP_UO = 104
+ MLIL_SET_VAR_SSA = 105
+ MLIL_SET_VAR_SSA_FIELD = 106
+ MLIL_SET_VAR_SPLIT_SSA = 107
+ MLIL_SET_VAR_ALIASED = 108
+ MLIL_SET_VAR_ALIASED_FIELD = 109
+ MLIL_VAR_SSA = 110
+ MLIL_VAR_SSA_FIELD = 111
+ MLIL_VAR_ALIASED = 112
+ MLIL_VAR_ALIASED_FIELD = 113
+ MLIL_VAR_SPLIT_SSA = 114
+ MLIL_CALL_SSA = 115
+ MLIL_CALL_UNTYPED_SSA = 116
+ MLIL_SYSCALL_SSA = 117
+ MLIL_SYSCALL_UNTYPED_SSA = 118
+ MLIL_TAILCALL_SSA = 119
+ MLIL_TAILCALL_UNTYPED_SSA = 120
+ MLIL_CALL_PARAM_SSA = 121
+ MLIL_CALL_OUTPUT_SSA = 122
+ MLIL_LOAD_SSA = 123
+ MLIL_LOAD_STRUCT_SSA = 124
+ MLIL_STORE_SSA = 125
+ MLIL_STORE_STRUCT_SSA = 126
+ MLIL_INTRINSIC_SSA = 127
+ MLIL_FREE_VAR_SLOT_SSA = 128
+ MLIL_VAR_PHI = 129
+ MLIL_MEM_PHI = 130
+
+
+[docs]class MemberAccess(enum.IntEnum):
+ NoAccess = 0
+ PrivateAccess = 1
+ ProtectedAccess = 2
+ PublicAccess = 3
+
+
+[docs]class MemberScope(enum.IntEnum):
+ NoScope = 0
+ StaticScope = 1
+ VirtualScope = 2
+ ThunkScope = 3
+ FriendScope = 4
+
+
+[docs]class MessageBoxButtonResult(enum.IntEnum):
+ NoButton = 0
+ YesButton = 1
+ OKButton = 2
+ CancelButton = 3
+
+
+[docs]class MessageBoxButtonSet(enum.IntEnum):
+ OKButtonSet = 0
+ YesNoButtonSet = 1
+ YesNoCancelButtonSet = 2
+
+
+[docs]class MessageBoxIcon(enum.IntEnum):
+ InformationIcon = 0
+ QuestionIcon = 1
+ WarningIcon = 2
+ ErrorIcon = 3
+
+
+[docs]class MetadataType(enum.IntEnum):
+ InvalidDataType = 0
+ BooleanDataType = 1
+ StringDataType = 2
+ UnsignedIntegerDataType = 3
+ SignedIntegerDataType = 4
+ DoubleDataType = 5
+ RawDataType = 6
+ KeyValueDataType = 7
+ ArrayDataType = 8
+
+
+
+
+
+[docs]class NameType(enum.IntEnum):
+ NoNameType = 0
+ ConstructorNameType = 1
+ DestructorNameType = 2
+ OperatorNewNameType = 3
+ OperatorDeleteNameType = 4
+ OperatorAssignNameType = 5
+ OperatorRightShiftNameType = 6
+ OperatorLeftShiftNameType = 7
+ OperatorNotNameType = 8
+ OperatorEqualNameType = 9
+ OperatorNotEqualNameType = 10
+ OperatorArrayNameType = 11
+ OperatorArrowNameType = 12
+ OperatorStarNameType = 13
+ OperatorIncrementNameType = 14
+ OperatorDecrementNameType = 15
+ OperatorMinusNameType = 16
+ OperatorPlusNameType = 17
+ OperatorBitAndNameType = 18
+ OperatorArrowStarNameType = 19
+ OperatorDivideNameType = 20
+ OperatorModulusNameType = 21
+ OperatorLessThanNameType = 22
+ OperatorLessThanEqualNameType = 23
+ OperatorGreaterThanNameType = 24
+ OperatorGreaterThanEqualNameType = 25
+ OperatorCommaNameType = 26
+ OperatorParenthesesNameType = 27
+ OperatorTildeNameType = 28
+ OperatorXorNameType = 29
+ OperatorBitOrNameType = 30
+ OperatorLogicalAndNameType = 31
+ OperatorLogicalOrNameType = 32
+ OperatorStarEqualNameType = 33
+ OperatorPlusEqualNameType = 34
+ OperatorMinusEqualNameType = 35
+ OperatorDivideEqualNameType = 36
+ OperatorModulusEqualNameType = 37
+ OperatorRightShiftEqualNameType = 38
+ OperatorLeftShiftEqualNameType = 39
+ OperatorAndEqualNameType = 40
+ OperatorOrEqualNameType = 41
+ OperatorXorEqualNameType = 42
+ VFTableNameType = 43
+ VBTableNameType = 44
+ VCallNameType = 45
+ TypeofNameType = 46
+ LocalStaticGuardNameType = 47
+ StringNameType = 48
+ VBaseDestructorNameType = 49
+ VectorDeletingDestructorNameType = 50
+ DefaultConstructorClosureNameType = 51
+ ScalarDeletingDestructorNameType = 52
+ VectorConstructorIteratorNameType = 53
+ VectorDestructorIteratorNameType = 54
+ VectorVBaseConstructorIteratoreNameType = 55
+ VirtualDisplacementMapNameType = 56
+ EHVectorConstructorIteratorNameType = 57
+ EHVectorDestructorIteratorNameType = 58
+ EHVectorVBaseConstructorIteratorNameType = 59
+ CopyConstructorClosureNameType = 60
+ UDTReturningNameType = 61
+ LocalVFTableNameType = 62
+ LocalVFTableConstructorClosureNameType = 63
+ OperatorNewArrayNameType = 64
+ OperatorDeleteArrayNameType = 65
+ PlacementDeleteClosureNameType = 66
+ PlacementDeleteClosureArrayNameType = 67
+ OperatorReturnTypeNameType = 68
+ RttiTypeDescriptor = 69
+ RttiBaseClassDescriptor = 70
+ RttiBaseClassArray = 71
+ RttiClassHeirarchyDescriptor = 72
+ RttiCompleteObjectLocator = 73
+ OperatorUnaryMinusNameType = 74
+ OperatorUnaryPlusNameType = 75
+ OperatorUnaryBitAndNameType = 76
+ OperatorUnaryStarNameType = 77
+
+
+[docs]class NamedTypeReferenceClass(enum.IntEnum):
+ UnknownNamedTypeClass = 0
+ TypedefNamedTypeClass = 1
+ ClassNamedTypeClass = 2
+ StructNamedTypeClass = 3
+ UnionNamedTypeClass = 4
+ EnumNamedTypeClass = 5
+
+
+[docs]class PluginCommandType(enum.IntEnum):
+ DefaultPluginCommand = 0
+ AddressPluginCommand = 1
+ RangePluginCommand = 2
+ FunctionPluginCommand = 3
+ LowLevelILFunctionPluginCommand = 4
+ LowLevelILInstructionPluginCommand = 5
+ MediumLevelILFunctionPluginCommand = 6
+ MediumLevelILInstructionPluginCommand = 7
+
+
+[docs]class PluginLoadOrder(enum.IntEnum):
+ EarlyPluginLoadOrder = 0
+ NormalPluginLoadOrder = 1
+ LatePluginLoadOrder = 2
+
+
+[docs]class PluginOrigin(enum.IntEnum):
+ OfficialPluginOrigin = 0
+ CommunityPluginOrigin = 1
+ OtherPluginOrigin = 2
+
+
+[docs]class PluginType(enum.IntEnum):
+ CorePluginType = 0
+ UiPluginType = 1
+ ArchitecturePluginType = 2
+ BinaryViewPluginType = 3
+
+
+[docs]class PluginUpdateStatus(enum.IntEnum):
+ UpToDatePluginStatus = 0
+ UpdatesAvailablePluginStatus = 1
+
+
+[docs]class PointerSuffix(enum.IntEnum):
+ Ptr64Suffix = 0
+ UnalignedSuffix = 1
+ RestrictSuffix = 2
+ ReferenceSuffix = 3
+ LvalueSuffix = 4
+
+
+[docs]class ReferenceType(enum.IntEnum):
+ PointerReferenceType = 0
+ ReferenceReferenceType = 1
+ RValueReferenceType = 2
+ NoReference = 3
+
+
+[docs]class RegisterValueType(enum.IntEnum):
+ UndeterminedValue = 0
+ EntryValue = 1
+ ConstantValue = 2
+ ConstantPointerValue = 3
+ ExternalPointerValue = 4
+ StackFrameOffset = 5
+ ReturnAddressValue = 6
+ ImportedAddressValue = 7
+ SignedRangeValue = 8
+ UnsignedRangeValue = 9
+ LookupTableValue = 10
+ InSetOfValues = 11
+ NotInSetOfValues = 12
+
+
+[docs]class RelocationType(enum.IntEnum):
+ ELFGlobalRelocationType = 0
+ ELFCopyRelocationType = 1
+ ELFJumpSlotRelocationType = 2
+ StandardRelocationType = 3
+ IgnoredRelocation = 4
+
+
+[docs]class ReportType(enum.IntEnum):
+ PlainTextReportType = 0
+ MarkdownReportType = 1
+ HTMLReportType = 2
+ FlowGraphReportType = 3
+
+
+[docs]class ScriptingProviderExecuteResult(enum.IntEnum):
+ InvalidScriptInput = 0
+ IncompleteScriptInput = 1
+ SuccessfulScriptExecution = 2
+ ScriptExecutionCancelled = 3
+
+
+[docs]class ScriptingProviderInputReadyState(enum.IntEnum):
+ NotReadyForInput = 0
+ ReadyForScriptExecution = 1
+ ReadyForScriptProgramInput = 2
+
+
+[docs]class SectionSemantics(enum.IntEnum):
+ DefaultSectionSemantics = 0
+ ReadOnlyCodeSectionSemantics = 1
+ ReadOnlyDataSectionSemantics = 2
+ ReadWriteDataSectionSemantics = 3
+ ExternalSectionSemantics = 4
+
+
+[docs]class SegmentFlag(enum.IntEnum):
+ SegmentExecutable = 1
+ SegmentWritable = 2
+ SegmentReadable = 4
+ SegmentContainsData = 8
+ SegmentContainsCode = 16
+ SegmentDenyWrite = 32
+ SegmentDenyExecute = 64
+
+
+[docs]class SettingsScope(enum.IntEnum):
+ SettingsInvalidScope = 0
+ SettingsAutoScope = 1
+ SettingsDefaultScope = 2
+ SettingsUserScope = 3
+ SettingsWorkspaceScope = 4
+ SettingsContextScope = 5
+
+
+[docs]class StringType(enum.IntEnum):
+ AsciiString = 0
+ Utf16String = 1
+ Utf32String = 2
+ Utf8String = 3
+
+
+[docs]class StructureType(enum.IntEnum):
+ ClassStructureType = 0
+ StructStructureType = 1
+ UnionStructureType = 2
+
+
+[docs]class SymbolBinding(enum.IntEnum):
+ NoBinding = 0
+ LocalBinding = 1
+ GlobalBinding = 2
+ WeakBinding = 3
+
+
+[docs]class SymbolType(enum.IntEnum):
+ FunctionSymbol = 0
+ ImportAddressSymbol = 1
+ ImportedFunctionSymbol = 2
+ DataSymbol = 3
+ ImportedDataSymbol = 4
+ ExternalSymbol = 5
+
+
+[docs]class TransformType(enum.IntEnum):
+ BinaryCodecTransform = 0
+ TextCodecTransform = 1
+ UnicodeCodecTransform = 2
+ DecodeTransform = 3
+ BinaryEncodeTransform = 4
+ TextEncodeTransform = 5
+ EncryptTransform = 6
+ InvertingTransform = 7
+ HashTransform = 8
+
+
+[docs]class TypeClass(enum.IntEnum):
+ VoidTypeClass = 0
+ BoolTypeClass = 1
+ IntegerTypeClass = 2
+ FloatTypeClass = 3
+ StructureTypeClass = 4
+ EnumerationTypeClass = 5
+ PointerTypeClass = 6
+ ArrayTypeClass = 7
+ FunctionTypeClass = 8
+ VarArgsTypeClass = 9
+ ValueTypeClass = 10
+ NamedTypeReferenceClass = 11
+ WideCharTypeClass = 12
+
+
+[docs]class UpdateResult(enum.IntEnum):
+ UpdateFailed = 0
+ UpdateSuccess = 1
+ AlreadyUpToDate = 2
+ UpdateAvailable = 3
+
+
+[docs]class VariableSourceType(enum.IntEnum):
+ StackVariableSourceType = 0
+ RegisterVariableSourceType = 1
+ FlagVariableSourceType = 2
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import traceback
+import ctypes
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+
+[docs]class FileAccessor(object):
+[docs] def __init__(self):
+ self._cb = core.BNFileAccessor()
+ self._cb.context = 0
+ self._cb.getLength = self._cb.getLength.__class__(self._get_length)
+ self._cb.read = self._cb.read.__class__(self._read)
+ self._cb.write = self._cb.write.__class__(self._write)
+
+ def __len__(self):
+ return self.get_length()
+
+ def _get_length(self, ctxt):
+ try:
+ return self.get_length()
+ except:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _read(self, ctxt, dest, offset, length):
+ try:
+ data = self.read(offset, length)
+ if data is None:
+ return 0
+ if len(data) > length:
+ data = data[0:length]
+ ctypes.memmove(dest, data, len(data))
+ return len(data)
+ except:
+ log.log_error(traceback.format_exc())
+ return 0
+
+ def _write(self, ctxt, offset, src, length):
+ try:
+ data = ctypes.create_string_buffer(length)
+ ctypes.memmove(data, src, length)
+ return self.write(offset, data.raw)
+ except:
+ log.log_error(traceback.format_exc())
+ return 0
+
+
+[docs]class CoreFileAccessor(FileAccessor):
+[docs] def __init__(self, accessor):
+ self._cb.context = accessor.context
+ self._cb.getLength = accessor.getLength
+ self._cb.read = accessor.read
+ self._cb.write = accessor.write
+
+
+
+[docs] def read(self, offset, length):
+ data = ctypes.create_string_buffer(length)
+ length = self._cb.read(self._cb.context, data, offset, length)
+ return data.raw[0:length]
+
+[docs] def write(self, offset, value):
+ value = str(value)
+ data = ctypes.create_string_buffer(value)
+ return self._cb.write(self._cb.context, offset, data, len(value))
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from __future__ import absolute_import
+import traceback
+import ctypes
+
+# Binary Ninja Components
+import binaryninja
+from binaryninja import _binaryninjacore as core
+from binaryninja import associateddatastore #required for _FileMetadataAssociatedDataStore
+from binaryninja import log
+
+
+
+
+class _FileMetadataAssociatedDataStore(associateddatastore._AssociatedDataStore):
+ _defaults = {}
+
+
+[docs]class FileMetadata(object):
+ """
+ ``class FileMetadata`` represents the file being analyzed by Binary Ninja. It is responsible for opening,
+ closing, creating the database (.bndb) files, and is used to keep track of undoable actions.
+ """
+
+ _associated_data = {}
+
+[docs] def __init__(self, filename = None, handle = None):
+ """
+ Instantiates a new FileMetadata class.
+
+ :param filename: The string path to the file to be opened. Defaults to None.
+ :param handle: A handle to the underlying C FileMetadata object. Defaults to None.
+ """
+ if handle is not None:
+ self.handle = core.handle_of_type(handle, core.BNFileMetadata)
+ else:
+ binaryninja._init_plugins()
+ self.handle = core.BNCreateFileMetadata()
+ if filename is not None:
+ core.BNSetFilename(self.handle, str(filename))
+ self.nav = None
+
+ def __del__(self):
+ if self.navigation is not None:
+ core.BNSetFileMetadataNavigationHandler(self.handle, None)
+ core.BNFreeFileMetadata(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, FileMetadata):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, FileMetadata):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @classmethod
+ def _unregister(cls, f):
+ handle = ctypes.cast(f, ctypes.c_void_p)
+ if handle.value in cls._associated_data:
+ del cls._associated_data[handle.value]
+
+[docs] @classmethod
+ def set_default_session_data(cls, name, value):
+ _FileMetadataAssociatedDataStore.set_default(name, value)
+
+ @property
+ def original_filename(self):
+ """The original name of the binary opened if a bndb, otherwise reads or sets the current filename (read/write)"""
+ return core.BNGetOriginalFilename(self.handle)
+
+ @original_filename.setter
+ def original_filename(self, value):
+ core.BNSetOriginalFilename(self.handle, str(value))
+
+ @property
+ def filename(self):
+ """The name of the open bndb or binary filename (read/write)"""
+ return core.BNGetFilename(self.handle)
+
+ @filename.setter
+ def filename(self, value):
+ core.BNSetFilename(self.handle, str(value))
+
+ @property
+ def modified(self):
+ """Boolean result of whether the file is modified (Inverse of 'saved' property) (read/write)"""
+ return core.BNIsFileModified(self.handle)
+
+ @modified.setter
+ def modified(self, value):
+ if value:
+ core.BNMarkFileModified(self.handle)
+ else:
+ core.BNMarkFileSaved(self.handle)
+
+ @property
+ def analysis_changed(self):
+ """Boolean result of whether the auto-analysis results have changed (read-only)"""
+ return core.BNIsAnalysisChanged(self.handle)
+
+ @property
+ def has_database(self):
+ """Whether the FileMetadata is backed by a database (read-only)"""
+ return core.BNIsBackedByDatabase(self.handle)
+
+ @property
+ def view(self):
+ return core.BNGetCurrentView(self.handle)
+
+ @view.setter
+ def view(self, value):
+ core.BNNavigate(self.handle, str(value), core.BNGetCurrentOffset(self.handle))
+
+ @property
+ def offset(self):
+ """The current offset into the file (read/write)"""
+ return core.BNGetCurrentOffset(self.handle)
+
+ @offset.setter
+ def offset(self, value):
+ core.BNNavigate(self.handle, core.BNGetCurrentView(self.handle), value)
+
+ @property
+ def raw(self):
+ """Gets the "Raw" BinaryView of the file"""
+ view = core.BNGetFileViewOfType(self.handle, "Raw")
+ if view is None:
+ return None
+ return binaryninja.binaryview.BinaryView(file_metadata = self, handle = view)
+
+ @property
+ def saved(self):
+ """Boolean result of whether the file has been saved (Inverse of 'modified' property) (read/write)"""
+ return not core.BNIsFileModified(self.handle)
+
+ @saved.setter
+ def saved(self, value):
+ if value:
+ core.BNMarkFileSaved(self.handle)
+ else:
+ core.BNMarkFileModified(self.handle)
+
+ @property
+ def navigation(self):
+ return self.nav
+
+ @navigation.setter
+ def navigation(self, value):
+ value._register(self.handle)
+ self.nav = value
+
+ @property
+ def session_data(self):
+ """Dictionary object where plugins can store arbitrary data associated with the file"""
+ handle = ctypes.cast(self.handle, ctypes.c_void_p)
+ if handle.value not in FileMetadata._associated_data:
+ obj = _FileMetadataAssociatedDataStore()
+ FileMetadata._associated_data[handle.value] = obj
+ return obj
+ else:
+ return FileMetadata._associated_data[handle.value]
+
+[docs] def close(self):
+ """
+ Closes the underlying file handle. It is recommended that this is done in a
+ `finally` clause to avoid handle leaks.
+ """
+ core.BNCloseFile(self.handle)
+
+[docs] def begin_undo_actions(self):
+ """
+ ``begin_undo_actions`` start recording actions taken so the can be undone at some point.
+
+ :rtype: None
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.begin_undo_actions()
+ >>> bv.convert_to_nop(0x100012f1)
+ True
+ >>> bv.commit_undo_actions()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>> bv.undo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>>
+ """
+ core.BNBeginUndoActions(self.handle)
+
+[docs] def commit_undo_actions(self):
+ """
+ ``commit_undo_actions`` commit the actions taken since the last commit to the undo database.
+
+ :rtype: None
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.begin_undo_actions()
+ >>> bv.convert_to_nop(0x100012f1)
+ True
+ >>> bv.commit_undo_actions()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>> bv.undo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>>
+ """
+ core.BNCommitUndoActions(self.handle)
+
+[docs] def undo(self):
+ """
+ ``undo`` undo the last commited action in the undo database.
+
+ :rtype: None
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.begin_undo_actions()
+ >>> bv.convert_to_nop(0x100012f1)
+ True
+ >>> bv.commit_undo_actions()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>> bv.undo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.redo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>>
+ """
+ core.BNUndo(self.handle)
+
+[docs] def redo(self):
+ """
+ ``redo`` redo the last commited action in the undo database.
+
+ :rtype: None
+ :Example:
+
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.begin_undo_actions()
+ >>> bv.convert_to_nop(0x100012f1)
+ True
+ >>> bv.commit_undo_actions()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>> bv.undo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'xor eax, eax'
+ >>> bv.redo()
+ >>> bv.get_disassembly(0x100012f1)
+ 'nop'
+ >>>
+ """
+ core.BNRedo(self.handle)
+
+
+
+[docs] def create_database(self, filename, progress_func = None):
+ if progress_func is None:
+ return core.BNCreateDatabase(self.raw.handle, str(filename))
+ else:
+ return core.BNCreateDatabaseWithProgress(self.raw.handle, str(filename), None,
+ ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_ulonglong, ctypes.c_ulonglong)(
+ lambda ctxt, cur, total: progress_func(cur, total)))
+
+[docs] def open_existing_database(self, filename, progress_func = None):
+ if progress_func is None:
+ view = core.BNOpenExistingDatabase(self.handle, str(filename))
+ else:
+ view = core.BNOpenExistingDatabaseWithProgress(self.handle, str(filename), None,
+ ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_ulonglong, ctypes.c_ulonglong)(
+ lambda ctxt, cur, total: progress_func(cur, total)))
+ if view is None:
+ return None
+ return binaryninja.binaryview.BinaryView(file_metadata = self, handle = view)
+
+[docs] def save_auto_snapshot(self, progress_func = None):
+ if progress_func is None:
+ return core.BNSaveAutoSnapshot(self.raw.handle)
+ else:
+ return core.BNSaveAutoSnapshotWithProgress(self.raw.handle, None,
+ ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_ulonglong, ctypes.c_ulonglong)(
+ lambda ctxt, cur, total: progress_func(cur, total)))
+
+[docs] def get_view_of_type(self, name):
+ view = core.BNGetFileViewOfType(self.handle, str(name))
+ if view is None:
+ view_type = core.BNGetBinaryViewTypeByName(str(name))
+ if view_type is None:
+ return None
+ view = core.BNCreateBinaryViewOfType(view_type, self.raw.handle)
+ if view is None:
+ return None
+ return binaryninja.binaryview.BinaryView(file_metadata = self, handle = view)
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+# Copyright (c) 2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import ctypes
+import threading
+import traceback
+
+# Binary Ninja components
+import binaryninja
+from binaryninja.enums import (BranchType, InstructionTextTokenType, HighlightStandardColor)
+from binaryninja import _binaryninjacore as core
+from binaryninja import function
+from binaryninja import binaryview
+from binaryninja import lowlevelil
+from binaryninja import mediumlevelil
+from binaryninja import basicblock
+from binaryninja import log
+from binaryninja import highlight
+
+# 2-3 compatibility
+from binaryninja import range
+
+
+[docs]class FlowGraphEdge(object):
+[docs] def __init__(self, branch_type, source, target, points, back_edge):
+ self.type = BranchType(branch_type)
+ self.source = source
+ self.target = target
+ self.points = points
+ self.back_edge = back_edge
+
+ def __repr__(self):
+ return "<%s: %s>" % (self.type.name, repr(self.target))
+
+
+[docs]class FlowGraphNode(object):
+[docs] def __init__(self, graph = None, handle = None):
+ if handle is None:
+ if graph is None:
+ self.handle = None
+ raise ValueError("flow graph node must be associated with a graph")
+ handle = core.BNCreateFlowGraphNode(graph.handle)
+ self.handle = handle
+ self.graph = graph
+ if self.graph is None:
+ self.graph = FlowGraph(handle = core.BNGetFlowGraphNodeOwner(self.handle))
+
+ def __del__(self):
+ if self.handle is not None:
+ core.BNFreeFlowGraphNode(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, FlowGraphNode):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, FlowGraphNode):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def basic_block(self):
+ """Basic block associated with this part of the flow graph"""
+ block = core.BNGetFlowGraphBasicBlock(self.handle)
+ if not block:
+ return None
+ func_handle = core.BNGetBasicBlockFunction(block)
+ if not func_handle:
+ core.BNFreeBasicBlock(block)
+ return None
+
+ view = binaryview.BinaryView(handle = core.BNGetFunctionData(func_handle))
+ func = function.Function(view, func_handle)
+
+ if core.BNIsLowLevelILBasicBlock(block):
+ block = lowlevelil.LowLevelILBasicBlock(view, block,
+ lowlevelil.LowLevelILFunction(func.arch, core.BNGetBasicBlockLowLevelILFunction(block), func))
+ elif core.BNIsMediumLevelILBasicBlock(block):
+ block = mediumlevelil.MediumLevelILBasicBlock(view, block,
+ mediumlevelil.MediumLevelILFunction(func.arch, core.BNGetBasicBlockMediumLevelILFunction(block), func))
+ else:
+ block = basicblock.BasicBlock(block, view)
+ return block
+
+ @basic_block.setter
+ def basic_block(self, block):
+ if block is None:
+ core.BNSetFlowGraphBasicBlock(self.handle, None)
+ else:
+ core.BNSetFlowGraphBasicBlock(self.handle, block.handle)
+
+ @property
+ def x(self):
+ """Flow graph block X (read-only)"""
+ return core.BNGetFlowGraphNodeX(self.handle)
+
+ @property
+ def y(self):
+ """Flow graph block Y (read-only)"""
+ return core.BNGetFlowGraphNodeY(self.handle)
+
+ @property
+ def width(self):
+ """Flow graph block width (read-only)"""
+ return core.BNGetFlowGraphNodeWidth(self.handle)
+
+ @property
+ def height(self):
+ """Flow graph block height (read-only)"""
+ return core.BNGetFlowGraphNodeHeight(self.handle)
+
+ @property
+ def lines(self):
+ """Flow graph block list of text lines"""
+ count = ctypes.c_ulonglong()
+ lines = core.BNGetFlowGraphNodeLines(self.handle, count)
+ block = self.basic_block
+ result = []
+ for i in range(0, count.value):
+ addr = lines[i].addr
+ if (lines[i].instrIndex != 0xffffffffffffffff) and (block is not None) and hasattr(block, 'il_function'):
+ il_instr = block.il_function[lines[i].instrIndex]
+ else:
+ il_instr = None
+ color = highlight.HighlightColor._from_core_struct(lines[i].highlight)
+ tokens = function.InstructionTextToken.get_instruction_lines(lines[i].tokens, lines[i].count)
+ result.append(function.DisassemblyTextLine(tokens, addr, il_instr, color))
+ core.BNFreeDisassemblyTextLines(lines, count.value)
+ return result
+
+ @lines.setter
+ def lines(self, lines):
+ if isinstance(lines, str):
+ lines = lines.split('\n')
+ line_buf = (core.BNDisassemblyTextLine * len(lines))()
+ for i in range(0, len(lines)):
+ line = lines[i]
+ if isinstance(line, str):
+ line = function.DisassemblyTextLine([function.InstructionTextToken(InstructionTextTokenType.TextToken, line)])
+ if not isinstance(line, function.DisassemblyTextLine):
+ line = function.DisassemblyTextLine(line)
+ if line.address is None:
+ if len(line.tokens) > 0:
+ line_buf[i].addr = line.tokens[0].address
+ else:
+ line_buf[i].addr = 0
+ else:
+ line_buf[i].addr = line.address
+ if line.il_instruction is not None:
+ line_buf[i].instrIndex = line.il_instruction.instr_index
+ else:
+ line_buf[i].instrIndex = 0xffffffffffffffff
+ color = line.highlight
+ if not isinstance(color, HighlightStandardColor) and not isinstance(color, highlight.HighlightColor):
+ raise ValueError("Specified color is not one of HighlightStandardColor, highlight.HighlightColor")
+ if isinstance(color, HighlightStandardColor):
+ color = highlight.HighlightColor(color)
+ line_buf[i].highlight = color._get_core_struct()
+ line_buf[i].count = len(line.tokens)
+ line_buf[i].tokens = function.InstructionTextToken.get_instruction_lines(line.tokens)
+ core.BNSetFlowGraphNodeLines(self.handle, line_buf, len(lines))
+
+ @property
+ def outgoing_edges(self):
+ """Flow graph block list of outgoing edges (read-only)"""
+ count = ctypes.c_ulonglong()
+ edges = core.BNGetFlowGraphNodeOutgoingEdges(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ branch_type = BranchType(edges[i].type)
+ target = edges[i].target
+ if target:
+ target = FlowGraphNode(self.graph, core.BNNewFlowGraphNodeReference(target))
+ points = []
+ for j in range(0, edges[i].pointCount):
+ points.append((edges[i].points[j].x, edges[i].points[j].y))
+ result.append(FlowGraphEdge(branch_type, self, target, points, edges[i].backEdge))
+ core.BNFreeFlowGraphNodeOutgoingEdgeList(edges, count.value)
+ return result
+
+ @property
+ def highlight(self):
+ """Gets or sets the highlight color for the node
+
+ :Example:
+ >>> g = FlowGraph()
+ >>> node = FlowGraphNode(g)
+ >>> node.highlight = HighlightStandardColor.BlueHighlightColor
+ >>> node.highlight
+ <color: blue>
+ """
+ return highlight.HighlightColor._from_core_struct(core.BNGetFlowGraphNodeHighlight(self.handle))
+
+ @highlight.setter
+ def highlight(self, color):
+ if not isinstance(color, HighlightStandardColor) and not isinstance(color, highlight.HighlightColor):
+ raise ValueError("Specified color is not one of HighlightStandardColor, highlight.HighlightColor")
+ if isinstance(color, HighlightStandardColor):
+ color = highlight.HighlightColor(color)
+ core.BNSetFlowGraphNodeHighlight(self.handle, color._get_core_struct())
+
+ def __repr__(self):
+ block = self.basic_block
+ if block:
+ arch = block.arch
+ if arch:
+ return "<graph node: %s@%#x-%#x>" % (arch.name, block.start, block.end)
+ else:
+ return "<graph node: %#x-%#x>" % (block.start, block.end)
+ return "<graph node>"
+
+ def __iter__(self):
+ count = ctypes.c_ulonglong()
+ lines = core.BNGetFlowGraphNodeLines(self.handle, count)
+ block = self.basic_block
+ try:
+ for i in range(0, count.value):
+ addr = lines[i].addr
+ if (lines[i].instrIndex != 0xffffffffffffffff) and (block is not None) and hasattr(block, 'il_function'):
+ il_instr = block.il_function[lines[i].instrIndex]
+ else:
+ il_instr = None
+ tokens = function.InstructionTextToken.get_instruction_lines(lines[i].tokens, lines[i].count)
+ yield function.DisassemblyTextLine(tokens, addr, il_instr)
+ finally:
+ core.BNFreeDisassemblyTextLines(lines, count.value)
+
+[docs] def add_outgoing_edge(self, edge_type, target):
+ """
+ ``add_outgoing_edge`` connects two flow graph nodes with an edge.
+
+ :param BranchType edge_type: Type of edge to add
+ :param FlowGraphNode target: Target node object
+ """
+ core.BNAddFlowGraphNodeOutgoingEdge(self.handle, edge_type, target.handle)
+
+
+[docs]class FlowGraphLayoutRequest(object):
+[docs] def __init__(self, graph, callback = None):
+ self.on_complete = callback
+ self._cb = ctypes.CFUNCTYPE(None, ctypes.c_void_p)(self._complete)
+ self.handle = core.BNStartFlowGraphLayout(graph.handle, None, self._cb)
+
+ def __del__(self):
+ self.abort()
+ core.BNFreeFlowGraphLayoutRequest(self.handle)
+
+ def _complete(self, ctxt):
+ try:
+ if self.on_complete is not None:
+ self.on_complete()
+ except:
+ log.log_error(traceback.format_exc())
+
+ @property
+ def complete(self):
+ """Whether flow graph layout is complete (read-only)"""
+ return core.BNIsFlowGraphLayoutRequestComplete(self.handle)
+
+ @property
+ def graph(self):
+ """Flow graph that is being processed (read-only)"""
+ return CoreFlowGraph(core.BNGetGraphForFlowGraphLayoutRequest(self.handle))
+
+
+
+
+[docs]class FlowGraph(object):
+ """
+ ``class FlowGraph`` implements a directed flow graph to be shown in the UI. This class allows plugins to
+ create custom flow graphs and render them in the UI using the flow graph report API.
+
+ An example of creating a flow graph and presenting it in the UI:
+
+ >>> graph = FlowGraph()
+ >>> node_a = FlowGraphNode(graph)
+ >>> node_a.lines = ["Node A"]
+ >>> node_b = FlowGraphNode(graph)
+ >>> node_b.lines = ["Node B"]
+ >>> node_c = FlowGraphNode(graph)
+ >>> node_c.lines = ["Node C"]
+ >>> graph.append(node_a)
+ 0
+ >>> graph.append(node_b)
+ 1
+ >>> graph.append(node_c)
+ 2
+ >>> node_a.add_outgoing_edge(BranchType.UnconditionalBranch, node_b)
+ >>> node_a.add_outgoing_edge(BranchType.UnconditionalBranch, node_c)
+ >>> show_graph_report("Custom Graph", graph)
+
+ .. note:: In the current implementation, only graphs that have a single start node where all other nodes are \
+ reachable from outgoing edges can be rendered correctly. This describes the natural limitations of a control \
+ flow graph, which is what the rendering logic was designed for. Graphs that have nodes that are only reachable \
+ from incoming edges, or graphs that have disjoint subgraphs will not render correctly. This will be fixed \
+ in a future version.
+ """
+[docs] def __init__(self, handle = None):
+ if handle is None:
+ self._ext_cb = core.BNCustomFlowGraph()
+ self._ext_cb.context = 0
+ self._ext_cb.prepareForLayout = self._ext_cb.prepareForLayout.__class__(self._prepare_for_layout)
+ self._ext_cb.populateNodes = self._ext_cb.populateNodes.__class__(self._populate_nodes)
+ self._ext_cb.completeLayout = self._ext_cb.completeLayout.__class__(self._complete_layout)
+ self._ext_cb.update = self._ext_cb.update.__class__(self._update)
+ handle = core.BNCreateCustomFlowGraph(self._ext_cb)
+ self.handle = handle
+
+ def __del__(self):
+ core.BNFreeFlowGraph(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, FlowGraph):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, FlowGraph):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ def _prepare_for_layout(self, ctxt):
+ try:
+ self.prepare_for_layout()
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _populate_nodes(self, ctxt):
+ try:
+ self.populate_nodes()
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _complete_layout(self, ctxt):
+ try:
+ self.complete_layout()
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _update(self, ctxt):
+ try:
+ graph = self.update()
+ if graph is None:
+ return None
+ return ctypes.cast(core.BNNewFlowGraphReference(graph.handle), ctypes.c_void_p).value
+ except:
+ log.log_error(traceback.format_exc())
+ return None
+
+[docs] def finish_prepare_for_layout(self):
+ """
+ ``finish_prepare_for_layout`` signals that preparations for rendering a graph are complete.
+ This method should only be called by a ``prepare_for_layout`` reimplementation.
+ """
+ core.BNFinishPrepareForLayout(self.handle)
+
+[docs] def prepare_for_layout(self):
+ """
+ ``prepare_for_layout`` can be overridden by subclasses to handling preparations that must take
+ place before a flow graph is rendered, such as waiting for a function to finish analysis. If
+ this function is overridden, the ``finish_prepare_for_layout`` method must be called once
+ preparations are completed.
+ """
+ self.finish_prepare_for_layout()
+
+[docs] def populate_nodes(self):
+ """
+ ``prepare_for_layout`` can be overridden by subclasses to create nodes in a graph when a flow
+ graph needs to be rendered. This will happen on a worker thread and will not block the UI.
+ """
+ pass
+
+[docs] def complete_layout(self):
+ """
+ ``complete_layout`` can be overridden by subclasses and is called when a graph layout is completed.
+ """
+ pass
+
+ @property
+ def function(self):
+ """Function for a flow graph"""
+ func = core.BNGetFunctionForFlowGraph(self.handle)
+ if func is None:
+ return None
+ return function.Function(handle = func)
+
+ @function.setter
+ def function(self, func):
+ if func is not None:
+ func = func.handle
+ core.BNSetFunctionForFlowGraph(self.handle, func)
+
+ @property
+ def complete(self):
+ """Whether flow graph layout is complete (read-only)"""
+ return core.BNIsFlowGraphLayoutComplete(self.handle)
+
+ @property
+ def nodes(self):
+ """List of nodes in graph (read-only)"""
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetFlowGraphNodes(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(FlowGraphNode(self, core.BNNewFlowGraphNodeReference(blocks[i])))
+ core.BNFreeFlowGraphNodeList(blocks, count.value)
+ return result
+
+ @property
+ def has_nodes(self):
+ """Whether the flow graph has at least one node (read-only)"""
+ return core.BNFlowGraphHasNodes(self.handle)
+
+ @property
+ def width(self):
+ """Flow graph width (read-only)"""
+ return core.BNGetFlowGraphWidth(self.handle)
+
+ @property
+ def height(self):
+ """Flow graph height (read-only)"""
+ return core.BNGetFlowGraphHeight(self.handle)
+
+ @property
+ def horizontal_block_margin(self):
+ return core.BNGetHorizontalFlowGraphBlockMargin(self.handle)
+
+ @horizontal_block_margin.setter
+ def horizontal_block_margin(self, value):
+ core.BNSetFlowGraphBlockMargins(self.handle, value, self.vertical_block_margin)
+
+ @property
+ def vertical_block_margin(self):
+ return core.BNGetVerticalFlowGraphBlockMargin(self.handle)
+
+ @vertical_block_margin.setter
+ def vertical_block_margin(self, value):
+ core.BNSetFlowGraphBlockMargins(self.handle, self.horizontal_block_margin, value)
+
+ @property
+ def is_il(self):
+ return core.BNIsILFlowGraph(self.handle)
+
+ @property
+ def is_low_level_il(self):
+ return core.BNIsLowLevelILFlowGraph(self.handle)
+
+ @property
+ def is_medium_level_il(self):
+ return core.BNIsMediumLevelILFlowGraph(self.handle)
+
+ @property
+ def il_function(self):
+ if self.is_low_level_il:
+ il_func = core.BNGetFlowGraphLowLevelILFunction(self.handle)
+ if not il_func:
+ return None
+ function = self.function
+ if function is None:
+ return None
+ return lowlevelil.LowLevelILFunction(function.arch, il_func, function)
+ if self.is_medium_level_il:
+ il_func = core.BNGetFlowGraphMediumLevelILFunction(self.handle)
+ if not il_func:
+ return None
+ function = self.function
+ if function is None:
+ return None
+ return mediumlevelil.MediumLevelILFunction(function.arch, il_func, function)
+ return None
+
+ @il_function.setter
+ def il_function(self, func):
+ if isinstance(func, lowlevelil.LowLevelILFunction):
+ core.BNSetFlowGraphLowLevelILFunction(self.handle, func.handle)
+ core.BNSetFlowGraphMediumLevelILFunction(self.handle, None)
+ elif isinstance(func, mediumlevelil.MediumLevelILFunction):
+ core.BNSetFlowGraphLowLevelILFunction(self.handle, None)
+ core.BNSetFlowGraphMediumLevelILFunction(self.handle, func.handle)
+ elif func is None:
+ core.BNSetFlowGraphLowLevelILFunction(self.handle, None)
+ core.BNSetFlowGraphMediumLevelILFunction(self.handle, None)
+ else:
+ raise TypeError("expected IL function for setting il_function property")
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __repr__(self):
+ function = self.function
+ if function is None:
+ return "<flow graph>"
+ return "<graph of %s>" % repr(function)
+
+ def __iter__(self):
+ count = ctypes.c_ulonglong()
+ nodes = core.BNGetFlowGraphNodes(self.handle, count)
+ try:
+ for i in range(0, count.value):
+ yield FlowGraphNode(self, core.BNNewFlowGraphNodeReference(nodes[i]))
+ finally:
+ core.BNFreeFlowGraphNodeList(nodes, count.value)
+
+[docs] def layout(self, callback = None):
+ """
+ ``layout`` starts rendering a graph for display. Once a layout is complete, each node will contain
+ coordinates and extents that can be used to render a graph with minimum additional computation.
+ This function does not wait for the graph to be ready to display, but a callback can be provided
+ to signal when the graph is ready.
+
+ :param callable() callback: Function to be called when the graph is ready to display
+ :return: Pending flow graph layout request object
+ :rtype: FlowGraphLayoutRequest
+ """
+ return FlowGraphLayoutRequest(self, callback)
+
+ def _wait_complete(self):
+ self._wait_cond.release()
+
+[docs] def layout_and_wait(self):
+ """
+ ``layout_and_wait`` starts rendering a graph for display, and waits for the graph to be ready to
+ display. After this function returns, each node will contain coordinates and extents that can be
+ used to render a graph with minimum additional computation.
+
+ Do not use this API on the UI thread (use ``layout`` with a callback instead).
+ """
+ self._wait_cond = threading.Lock()
+
+ self._wait_cond.acquire()
+
+ request = self.layout(self._wait_complete)
+
+ self._wait_cond.acquire()
+ self._wait_cond.release()
+
+[docs] def get_nodes_in_region(self, left, top, right, bottom):
+ count = ctypes.c_ulonglong()
+ nodes = core.BNGetFlowGraphNodesInRegion(self.handle, left, top, right, bottom, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(FlowGraphNode(self, core.BNNewFlowGraphNodeReference(nodes[i])))
+ core.BNFreeFlowGraphNodeList(nodes, count.value)
+ return result
+
+[docs] def append(self, node):
+ """
+ ``append`` adds a node to a flow graph.
+
+ :param FlowGraphNode node: Node to add
+ :return: Index of node
+ :rtype: int
+ """
+ return core.BNAddFlowGraphNode(self.handle, node.handle)
+
+ def __getitem__(self, i):
+ node = core.BNGetFlowGraphNode(self.handle, i)
+ if node is None:
+ return None
+ return FlowGraphNode(self, node)
+
+[docs] def show(self, title):
+ """
+ ``show`` displays the graph in a new tab in the UI.
+
+ :param str title: Title to show in the new tab
+ """
+ binaryninja.interaction.show_graph_report(title, self)
+
+[docs] def update(self):
+ """
+ ``update`` can be overridden by subclasses to allow a graph to be updated after it has been
+ presented in the UI. This will automatically occur if the function referenced by the ``function``
+ property has been updated.
+
+ Return a new ``FlowGraph`` object with the new information if updates are desired. If the graph
+ does not need updating, ``None`` can be returned to leave the graph in its current state.
+
+ :return: Updated graph, or ``None``
+ :rtype: FlowGraph
+ """
+ return None
+
+
+
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from __future__ import absolute_import
+import threading
+import traceback
+import ctypes
+
+# Binary Ninja components
+import binaryninja
+from binaryninja import _binaryninjacore as core
+from binaryninja import associateddatastore # Required in the main scope due to being an argument for _FunctionAssociatedDataStore
+from binaryninja import highlight
+from binaryninja import log
+from binaryninja import types
+from binaryninja.enums import (AnalysisSkipReason, FunctionGraphType, BranchType, SymbolType, InstructionTextTokenType,
+ HighlightStandardColor, HighlightColorStyle, RegisterValueType, ImplicitRegisterExtend,
+ DisassemblyOption, IntegerDisplayType, InstructionTextTokenContext, VariableSourceType,
+ FunctionAnalysisSkipOverride)
+
+# 2-3 compatibility
+from binaryninja import range
+
+
+[docs]class LookupTableEntry(object):
+[docs] def __init__(self, from_values, to_value):
+ self.from_values = from_values
+ self.to_value = to_value
+
+ def __repr__(self):
+ return "[%s] -> %#x" % (', '.join(["%#x" % i for i in self.from_values]), self.to_value)
+
+
+[docs]class RegisterValue(object):
+[docs] def __init__(self, arch = None, value = None, confidence = types.max_confidence):
+ self.is_constant = False
+ if value is None:
+ self.type = RegisterValueType.UndeterminedValue
+ else:
+ self.type = RegisterValueType(value.state)
+ if value.state == RegisterValueType.EntryValue:
+ self.arch = arch
+ if arch is not None:
+ self.reg = arch.get_reg_name(value.value)
+ else:
+ self.reg = value.value
+ elif (value.state == RegisterValueType.ConstantValue) or (value.state == RegisterValueType.ConstantPointerValue):
+ self.value = value.value
+ self.is_constant = True
+ elif value.state == RegisterValueType.StackFrameOffset:
+ self.offset = value.value
+ elif value.state == RegisterValueType.ImportedAddressValue:
+ self.value = value.value
+ self.confidence = confidence
+
+ def __repr__(self):
+ if self.type == RegisterValueType.EntryValue:
+ return "<entry %s>" % self.reg
+ if self.type == RegisterValueType.ConstantValue:
+ return "<const %#x>" % self.value
+ if self.type == RegisterValueType.ConstantPointerValue:
+ return "<const ptr %#x>" % self.value
+ if self.type == RegisterValueType.StackFrameOffset:
+ return "<stack frame offset %#x>" % self.offset
+ if self.type == RegisterValueType.ReturnAddressValue:
+ return "<return address>"
+ if self.type == RegisterValueType.ImportedAddressValue:
+ return "<imported address from entry %#x>" % self.value
+ return "<undetermined>"
+
+ def _to_api_object(self):
+ result = core.BNRegisterValue()
+ result.state = self.type
+ result.value = 0
+ if self.type == RegisterValueType.EntryValue:
+ if self.arch is not None:
+ result.value = self.arch.get_reg_index(self.reg)
+ else:
+ result.value = self.reg
+ elif (self.type == RegisterValueType.ConstantValue) or (self.type == RegisterValueType.ConstantPointerValue):
+ result.value = self.value
+ elif self.type == RegisterValueType.StackFrameOffset:
+ result.value = self.offset
+ elif self.type == RegisterValueType.ImportedAddressValue:
+ result.value = self.value
+ return result
+
+
+
+[docs] @classmethod
+ def entry_value(self, arch, reg):
+ result = RegisterValue()
+ result.type = RegisterValueType.EntryValue
+ result.arch = arch
+ result.reg = reg
+ return result
+
+[docs] @classmethod
+ def constant(self, value):
+ result = RegisterValue()
+ result.type = RegisterValueType.ConstantValue
+ result.value = value
+ result.is_constant = True
+ return result
+
+[docs] @classmethod
+ def constant_ptr(self, value):
+ result = RegisterValue()
+ result.type = RegisterValueType.ConstantPointerValue
+ result.value = value
+ result.is_constant = True
+ return result
+
+[docs] @classmethod
+ def stack_frame_offset(self, offset):
+ result = RegisterValue()
+ result.type = RegisterValueType.StackFrameOffset
+ result.offset = offset
+ return result
+
+[docs] @classmethod
+ def imported_address(self, value):
+ result = RegisterValue()
+ result.type = RegisterValueType.ImportedAddressValue
+ result.value = value
+ return result
+
+[docs] @classmethod
+ def return_address(self):
+ result = RegisterValue()
+ result.type = RegisterValueType.ReturnAddressValue
+ return result
+
+
+[docs]class ValueRange(object):
+[docs] def __init__(self, start, end, step):
+ self.start = start
+ self.end = end
+ self.step = step
+
+ def __repr__(self):
+ if self.step == 1:
+ return "<range: %#x to %#x>" % (self.start, self.end)
+ return "<range: %#x to %#x, step %#x>" % (self.start, self.end, self.step)
+
+
+[docs]class PossibleValueSet(object):
+[docs] def __init__(self, arch, value):
+ self.type = RegisterValueType(value.state)
+ if value.state == RegisterValueType.EntryValue:
+ self.reg = arch.get_reg_name(value.value)
+ elif value.state == RegisterValueType.ConstantValue:
+ self.value = value.value
+ elif value.state == RegisterValueType.ConstantPointerValue:
+ self.value = value.value
+ elif value.state == RegisterValueType.StackFrameOffset:
+ self.offset = value.value
+ elif value.state == RegisterValueType.SignedRangeValue:
+ self.offset = value.value
+ self.ranges = []
+ for i in range(0, value.count):
+ start = value.ranges[i].start
+ end = value.ranges[i].end
+ step = value.ranges[i].step
+ if start & (1 << 63):
+ start |= ~((1 << 63) - 1)
+ if end & (1 << 63):
+ end |= ~((1 << 63) - 1)
+ self.ranges.append(ValueRange(start, end, step))
+ elif value.state == RegisterValueType.UnsignedRangeValue:
+ self.offset = value.value
+ self.ranges = []
+ for i in range(0, value.count):
+ start = value.ranges[i].start
+ end = value.ranges[i].end
+ step = value.ranges[i].step
+ self.ranges.append(ValueRange(start, end, step))
+ elif value.state == RegisterValueType.LookupTableValue:
+ self.table = []
+ self.mapping = {}
+ for i in range(0, value.count):
+ from_list = []
+ for j in range(0, value.table[i].fromCount):
+ from_list.append(value.table[i].fromValues[j])
+ self.mapping[value.table[i].fromValues[j]] = value.table[i].toValue
+ self.table.append(LookupTableEntry(from_list, value.table[i].toValue))
+ elif (value.state == RegisterValueType.InSetOfValues) or (value.state == RegisterValueType.NotInSetOfValues):
+ self.values = set()
+ for i in range(0, value.count):
+ self.values.add(value.valueSet[i])
+
+ def __repr__(self):
+ if self.type == RegisterValueType.EntryValue:
+ return "<entry %s>" % self.reg
+ if self.type == RegisterValueType.ConstantValue:
+ return "<const %#x>" % self.value
+ if self.type == RegisterValueType.ConstantPointerValue:
+ return "<const ptr %#x>" % self.value
+ if self.type == RegisterValueType.StackFrameOffset:
+ return "<stack frame offset %#x>" % self.offset
+ if self.type == RegisterValueType.SignedRangeValue:
+ return "<signed ranges: %s>" % repr(self.ranges)
+ if self.type == RegisterValueType.UnsignedRangeValue:
+ return "<unsigned ranges: %s>" % repr(self.ranges)
+ if self.type == RegisterValueType.LookupTableValue:
+ return "<table: %s>" % ', '.join([repr(i) for i in self.table])
+ if self.type == RegisterValueType.InSetOfValues:
+ return "<in set(%s)>" % '[{}]'.format(', '.join(hex(i) for i in sorted(self.values)))
+ if self.type == RegisterValueType.NotInSetOfValues:
+ return "<not in set(%s)>" % '[{}]'.format(', '.join(hex(i) for i in sorted(self.values)))
+ if self.type == RegisterValueType.ReturnAddressValue:
+ return "<return address>"
+ return "<undetermined>"
+
+
+[docs]class StackVariableReference(object):
+[docs] def __init__(self, src_operand, t, name, var, ref_ofs, size):
+ self.source_operand = src_operand
+ self.type = t
+ self.name = name
+ self.var = var
+ self.referenced_offset = ref_ofs
+ self.size = size
+ if self.source_operand == 0xffffffff:
+ self.source_operand = None
+
+ def __repr__(self):
+ if self.source_operand is None:
+ if self.referenced_offset != self.var.storage:
+ return "<ref to %s%+#x>" % (self.name, self.referenced_offset - self.var.storage)
+ return "<ref to %s>" % self.name
+ if self.referenced_offset != self.var.storage:
+ return "<operand %d ref to %s%+#x>" % (self.source_operand, self.name, self.var.storage)
+ return "<operand %d ref to %s>" % (self.source_operand, self.name)
+
+
+[docs]class Variable(object):
+[docs] def __init__(self, func, source_type, index, storage, name = None, var_type = None):
+ self.function = func
+ self.source_type = VariableSourceType(source_type)
+ self.index = index
+ self.storage = storage
+
+ var = core.BNVariable()
+ var.type = source_type
+ var.index = index
+ var.storage = storage
+ self.identifier = core.BNToVariableIdentifier(var)
+
+ if func is not None:
+ if name is None:
+ name = core.BNGetVariableName(func.handle, var)
+ if var_type is None:
+ var_type_conf = core.BNGetVariableType(func.handle, var)
+ if var_type_conf.type:
+ var_type = types.Type(var_type_conf.type, platform = func.platform, confidence = var_type_conf.confidence)
+ else:
+ var_type = None
+
+ self.name = name
+ self.type = var_type
+
+[docs] @classmethod
+ def from_identifier(self, func, identifier, name = None, var_type = None):
+ var = core.BNFromVariableIdentifier(identifier)
+ return Variable(func, VariableSourceType(var.type), var.index, var.storage, name, var_type)
+
+ def __repr__(self):
+ if self.type is None:
+ return "<var %s>" % self.name
+ return "<var %s %s%s>" % (self.type.get_string_before_name(), self.name, self.type.get_string_after_name())
+
+ def __str__(self):
+ return self.name
+
+ def __eq__(self, other):
+ if not isinstance(other, Variable):
+ return False
+ return (self.identifier, self.function) == (other.identifier, other.function)
+
+ def __hash__(self):
+ return hash((self.identifier, self.function))
+
+
+[docs]class ConstantReference(object):
+[docs] def __init__(self, val, size, ptr, intermediate):
+ self.value = val
+ self.size = size
+ self.pointer = ptr
+ self.intermediate = intermediate
+
+ def __repr__(self):
+ if self.pointer:
+ return "<constant pointer %#x>" % self.value
+ if self.size == 0:
+ return "<constant %#x>" % self.value
+ return "<constant %#x size %d>" % (self.value, self.size)
+
+
+[docs]class IndirectBranchInfo(object):
+[docs] def __init__(self, source_arch, source_addr, dest_arch, dest_addr, auto_defined):
+ self.source_arch = source_arch
+ self.source_addr = source_addr
+ self.dest_arch = dest_arch
+ self.dest_addr = dest_addr
+ self.auto_defined = auto_defined
+
+ def __repr__(self):
+ return "<branch %s:%#x -> %s:%#x>" % (self.source_arch.name, self.source_addr, self.dest_arch.name, self.dest_addr)
+
+
+[docs]class ParameterVariables(object):
+[docs] def __init__(self, var_list, confidence = types.max_confidence):
+ self.vars = var_list
+ self.confidence = confidence
+
+ def __repr__(self):
+ return repr(self.vars)
+
+ def __iter__(self):
+ for var in self.vars:
+ yield var
+
+ def __getitem__(self, idx):
+ return self.vars[idx]
+
+ def __len__(self):
+ return len(self.vars)
+
+[docs] def with_confidence(self, confidence):
+ return ParameterVariables(list(self.vars), confidence = confidence)
+
+
+class _FunctionAssociatedDataStore(associateddatastore._AssociatedDataStore):
+ _defaults = {}
+
+
+[docs]class Function(object):
+ _associated_data = {}
+
+[docs] def __init__(self, view = None, handle = None):
+ self._advanced_analysis_requests = 0
+ if handle is None:
+ self.handle = None
+ raise NotImplementedError("creation of standalone 'Function' objects is not implemented")
+ self.handle = core.handle_of_type(handle, core.BNFunction)
+ if view is None:
+ self._view = binaryninja.binaryview.BinaryView(handle = core.BNGetFunctionData(self.handle))
+ else:
+ self._view = view
+ self._arch = None
+ self._platform = None
+
+ def __del__(self):
+ if self.handle is not None:
+ if self._advanced_analysis_requests > 0:
+ core.BNReleaseAdvancedFunctionAnalysisDataMultiple(self.handle, self._advanced_analysis_requests)
+ core.BNFreeFunction(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, Function):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, Function):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ def __hash__(self):
+ return hash((self.start, self.arch.name, self.platform.name))
+
+ def __getitem__(self, i):
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetFunctionBasicBlockList(self.handle, count)
+ try:
+ if i < 0:
+ i = count.value + i
+ if i < 0 or i >= count.value:
+ raise IndexError("index out of range")
+ block = binaryninja.basicblock.BasicBlock(core.BNNewBasicBlockReference(blocks[i]), self._view)
+ return block
+ finally:
+ core.BNFreeBasicBlockList(blocks, count.value)
+
+ def __iter__(self):
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetFunctionBasicBlockList(self.handle, count)
+ try:
+ for i in range(0, count.value):
+ yield binaryninja.basicblock.BasicBlock(core.BNNewBasicBlockReference(blocks[i]), self._view)
+ finally:
+ core.BNFreeBasicBlockList(blocks, count.value)
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __repr__(self):
+ arch = self.arch
+ if arch:
+ return "<func: %s@%#x>" % (arch.name, self.start)
+ else:
+ return "<func: %#x>" % self.start
+
+ @classmethod
+ def _unregister(cls, func):
+ handle = ctypes.cast(func, ctypes.c_void_p)
+ if handle.value in cls._associated_data:
+ del cls._associated_data[handle.value]
+
+[docs] @classmethod
+ def set_default_session_data(cls, name, value):
+ _FunctionAssociatedDataStore.set_default(name, value)
+
+ @property
+ def name(self):
+ """Symbol name for the function"""
+ return self.symbol.name
+
+ @name.setter
+ def name(self, value):
+ if value is None:
+ if self.symbol is not None:
+ self.view.undefine_user_symbol(self.symbol)
+ else:
+ symbol = types.Symbol(SymbolType.FunctionSymbol, self.start, value)
+ self.view.define_user_symbol(symbol)
+
+ @property
+ def view(self):
+ """Function view (read-only)"""
+ return self._view
+
+ @property
+ def arch(self):
+ """Function architecture (read-only)"""
+ if self._arch:
+ return self._arch
+ else:
+ arch = core.BNGetFunctionArchitecture(self.handle)
+ if arch is None:
+ return None
+ self._arch = binaryninja.architecture.CoreArchitecture._from_cache(arch)
+ return self._arch
+
+ @property
+ def platform(self):
+ """Function platform (read-only)"""
+ if self._platform:
+ return self._platform
+ else:
+ plat = core.BNGetFunctionPlatform(self.handle)
+ if plat is None:
+ return None
+ self._platform = binaryninja.platform.Platform(handle = plat)
+ return self._platform
+
+ @property
+ def start(self):
+ """Function start (read-only)"""
+ return core.BNGetFunctionStart(self.handle)
+
+ @property
+ def symbol(self):
+ """Function symbol(read-only)"""
+ sym = core.BNGetFunctionSymbol(self.handle)
+ if sym is None:
+ return None
+ return types.Symbol(None, None, None, handle = sym)
+
+ @property
+ def auto(self):
+ """Whether function was automatically discovered (read-only)"""
+ return core.BNWasFunctionAutomaticallyDiscovered(self.handle)
+
+ @property
+ def can_return(self):
+ """Whether function can return"""
+ result = core.BNCanFunctionReturn(self.handle)
+ return types.BoolWithConfidence(result.value, confidence = result.confidence)
+
+ @can_return.setter
+ def can_return(self, value):
+ bc = core.BNBoolWithConfidence()
+ bc.value = bool(value)
+ if hasattr(value, 'confidence'):
+ bc.confidence = value.confidence
+ else:
+ bc.confidence = types.max_confidence
+ core.BNSetUserFunctionCanReturn(self.handle, bc)
+
+ @property
+ def explicitly_defined_type(self):
+ """Whether function has explicitly defined types (read-only)"""
+ return core.BNFunctionHasExplicitlyDefinedType(self.handle)
+
+ @property
+ def needs_update(self):
+ """Whether the function has analysis that needs to be updated (read-only)"""
+ return core.BNIsFunctionUpdateNeeded(self.handle)
+
+ @property
+ def basic_blocks(self):
+ """List of basic blocks (read-only)"""
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetFunctionBasicBlockList(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(binaryninja.basicblock.BasicBlock(core.BNNewBasicBlockReference(blocks[i]), self._view))
+ core.BNFreeBasicBlockList(blocks, count.value)
+ return result
+
+ @property
+ def comments(self):
+ """Dict of comments (read-only)"""
+ count = ctypes.c_ulonglong()
+ addrs = core.BNGetCommentedAddresses(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ result[addrs[i]] = self.get_comment_at(addrs[i])
+ core.BNFreeAddressList(addrs)
+ return result
+
+ @property
+ def low_level_il(self):
+ """Deprecated property provided for compatibility. Use llil instead."""
+ return binaryninja.lowlevelil.LowLevelILFunction(self.arch, core.BNGetFunctionLowLevelIL(self.handle), self)
+
+ @property
+ def llil(self):
+ """returns LowLevelILFunction used to represent Function low level IL (read-only)"""
+ return binaryninja.lowlevelil.LowLevelILFunction(self.arch, core.BNGetFunctionLowLevelIL(self.handle), self)
+
+ @property
+ def lifted_il(self):
+ """returns LowLevelILFunction used to represent lifted IL (read-only)"""
+ return binaryninja.lowlevelil.LowLevelILFunction(self.arch, core.BNGetFunctionLiftedIL(self.handle), self)
+
+ @property
+ def medium_level_il(self):
+ """Deprecated property provided for compatibility. Use mlil instead."""
+ return binaryninja.mediumlevelil.MediumLevelILFunction(self.arch, core.BNGetFunctionMediumLevelIL(self.handle), self)
+
+ @property
+ def mlil(self):
+ """Function medium level IL (read-only)"""
+ return binaryninja.mediumlevelil.MediumLevelILFunction(self.arch, core.BNGetFunctionMediumLevelIL(self.handle), self)
+
+ @property
+ def function_type(self):
+ """Function type object"""
+ return types.Type(core.BNGetFunctionType(self.handle), platform = self.platform)
+
+ @function_type.setter
+ def function_type(self, value):
+ self.set_user_type(value)
+
+ @property
+ def stack_layout(self):
+ """List of function stack variables (read-only)"""
+ count = ctypes.c_ulonglong()
+ v = core.BNGetStackLayout(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(Variable(self, v[i].var.type, v[i].var.index, v[i].var.storage, v[i].name,
+ types.Type(handle = core.BNNewTypeReference(v[i].type), platform = self.platform, confidence = v[i].typeConfidence)))
+ result.sort(key = lambda x: x.identifier)
+ core.BNFreeVariableNameAndTypeList(v, count.value)
+ return result
+
+ @property
+ def vars(self):
+ """List of function variables (read-only)"""
+ count = ctypes.c_ulonglong()
+ v = core.BNGetFunctionVariables(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(Variable(self, v[i].var.type, v[i].var.index, v[i].var.storage, v[i].name,
+ types.Type(handle = core.BNNewTypeReference(v[i].type), platform = self.platform, confidence = v[i].typeConfidence)))
+ result.sort(key = lambda x: x.identifier)
+ core.BNFreeVariableNameAndTypeList(v, count.value)
+ return result
+
+ @property
+ def indirect_branches(self):
+ """List of indirect branches (read-only)"""
+ count = ctypes.c_ulonglong()
+ branches = core.BNGetIndirectBranches(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(IndirectBranchInfo(binaryninja.architecture.CoreArchitecture._from_cache(branches[i].sourceArch), branches[i].sourceAddr, binaryninja.architecture.CoreArchitecture._from_cache(branches[i].destArch), branches[i].destAddr, branches[i].autoDefined))
+ core.BNFreeIndirectBranchList(branches)
+ return result
+
+ @property
+ def session_data(self):
+ """Dictionary object where plugins can store arbitrary data associated with the function"""
+ handle = ctypes.cast(self.handle, ctypes.c_void_p)
+ if handle.value not in Function._associated_data:
+ obj = _FunctionAssociatedDataStore()
+ Function._associated_data[handle.value] = obj
+ return obj
+ else:
+ return Function._associated_data[handle.value]
+
+ @property
+ def analysis_performance_info(self):
+ count = ctypes.c_ulonglong()
+ info = core.BNGetFunctionAnalysisPerformanceInfo(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ result[info[i].name] = info[i].seconds
+ core.BNFreeAnalysisPerformanceInfo(info, count.value)
+ return result
+
+ @property
+ def type_tokens(self):
+ """Text tokens for this function's prototype"""
+ return self.get_type_tokens()[0].tokens
+
+ @property
+ def return_type(self):
+ """Return type of the function"""
+ result = core.BNGetFunctionReturnType(self.handle)
+ if not result.type:
+ return None
+ return types.Type(result.type, platform = self.platform, confidence = result.confidence)
+
+ @return_type.setter
+ def return_type(self, value):
+ type_conf = core.BNTypeWithConfidence()
+ if value is None:
+ type_conf.type = None
+ type_conf.confidence = 0
+ else:
+ type_conf.type = value.handle
+ type_conf.confidence = value.confidence
+ core.BNSetUserFunctionReturnType(self.handle, type_conf)
+
+ @property
+ def return_regs(self):
+ """Registers that are used for the return value"""
+ result = core.BNGetFunctionReturnRegisters(self.handle)
+ reg_set = []
+ for i in range(0, result.count):
+ reg_set.append(self.arch.get_reg_name(result.regs[i]))
+ regs = types.RegisterSet(reg_set, confidence = result.confidence)
+ core.BNFreeRegisterSet(result)
+ return regs
+
+ @return_regs.setter
+ def return_regs(self, value):
+ regs = core.BNRegisterSetWithConfidence()
+ regs.regs = (ctypes.c_uint * len(value))()
+ regs.count = len(value)
+ for i in range(0, len(value)):
+ regs.regs[i] = self.arch.get_reg_index(value[i])
+ if hasattr(value, 'confidence'):
+ regs.confidence = value.confidence
+ else:
+ regs.confidence = types.max_confidence
+ core.BNSetUserFunctionReturnRegisters(self.handle, regs)
+
+ @property
+ def calling_convention(self):
+ """Calling convention used by the function"""
+ result = core.BNGetFunctionCallingConvention(self.handle)
+ if not result.convention:
+ return None
+ return binaryninja.callingconvention.CallingConvention(None, handle = result.convention, confidence = result.confidence)
+
+ @calling_convention.setter
+ def calling_convention(self, value):
+ conv_conf = core.BNCallingConventionWithConfidence()
+ if value is None:
+ conv_conf.convention = None
+ conv_conf.confidence = 0
+ else:
+ conv_conf.convention = value.handle
+ conv_conf.confidence = value.confidence
+ core.BNSetUserFunctionCallingConvention(self.handle, conv_conf)
+
+ @property
+ def parameter_vars(self):
+ """List of variables for the incoming function parameters"""
+ result = core.BNGetFunctionParameterVariables(self.handle)
+ var_list = []
+ for i in range(0, result.count):
+ var_list.append(Variable(self, result.vars[i].type, result.vars[i].index, result.vars[i].storage))
+ confidence = result.confidence
+ core.BNFreeParameterVariables(result)
+ return ParameterVariables(var_list, confidence = confidence)
+
+ @parameter_vars.setter
+ def parameter_vars(self, value):
+ if value is None:
+ var_list = []
+ else:
+ var_list = list(value)
+ var_conf = core.BNParameterVariablesWithConfidence()
+ var_conf.vars = (core.BNVariable * len(var_list))()
+ var_conf.count = len(var_list)
+ for i in range(0, len(var_list)):
+ var_conf.vars[i].type = var_list[i].source_type
+ var_conf.vars[i].index = var_list[i].index
+ var_conf.vars[i].storage = var_list[i].storage
+ if value is None:
+ var_conf.confidence = 0
+ elif hasattr(value, 'confidence'):
+ var_conf.confidence = value.confidence
+ else:
+ var_conf.confidence = types.max_confidence
+ core.BNSetUserFunctionParameterVariables(self.handle, var_conf)
+
+ @property
+ def has_variable_arguments(self):
+ """Whether the function takes a variable number of arguments"""
+ result = core.BNFunctionHasVariableArguments(self.handle)
+ return types.BoolWithConfidence(result.value, confidence = result.confidence)
+
+ @has_variable_arguments.setter
+ def has_variable_arguments(self, value):
+ bc = core.BNBoolWithConfidence()
+ bc.value = bool(value)
+ if hasattr(value, 'confidence'):
+ bc.confidence = value.confidence
+ else:
+ bc.confidence = types.max_confidence
+ core.BNSetUserFunctionHasVariableArguments(self.handle, bc)
+
+ @property
+ def stack_adjustment(self):
+ """Number of bytes removed from the stack after return"""
+ result = core.BNGetFunctionStackAdjustment(self.handle)
+ return types.SizeWithConfidence(result.value, confidence = result.confidence)
+
+ @stack_adjustment.setter
+ def stack_adjustment(self, value):
+ oc = core.BNOffsetWithConfidence()
+ oc.value = int(value)
+ if hasattr(value, 'confidence'):
+ oc.confidence = value.confidence
+ else:
+ oc.confidence = types.max_confidence
+ core.BNSetUserFunctionStackAdjustment(self.handle, oc)
+
+ @property
+ def reg_stack_adjustments(self):
+ """Number of entries removed from each register stack after return"""
+ count = ctypes.c_ulonglong()
+ adjust = core.BNGetFunctionRegisterStackAdjustments(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ name = self.arch.get_reg_stack_name(adjust[i].regStack)
+ value = types.RegisterStackAdjustmentWithConfidence(adjust[i].adjustment,
+ confidence = adjust[i].confidence)
+ result[name] = value
+ core.BNFreeRegisterStackAdjustments(adjust)
+ return result
+
+ @reg_stack_adjustments.setter
+ def reg_stack_adjustments(self, value):
+ adjust = (core.BNRegisterStackAdjustment * len(value))()
+ i = 0
+ for reg_stack in value.keys():
+ adjust[i].regStack = self.arch.get_reg_stack_index(reg_stack)
+ if isinstance(value[reg_stack], types.RegisterStackAdjustmentWithConfidence):
+ adjust[i].adjustment = value[reg_stack].value
+ adjust[i].confidence = value[reg_stack].confidence
+ else:
+ adjust[i].adjustment = value[reg_stack]
+ adjust[i].confidence = types.max_confidence
+ i += 1
+ core.BNSetUserFunctionRegisterStackAdjustments(self.handle, adjust, len(value))
+
+ @property
+ def clobbered_regs(self):
+ """Registers that are modified by this function"""
+ result = core.BNGetFunctionClobberedRegisters(self.handle)
+ reg_set = []
+ for i in range(0, result.count):
+ reg_set.append(self.arch.get_reg_name(result.regs[i]))
+ regs = types.RegisterSet(reg_set, confidence = result.confidence)
+ core.BNFreeRegisterSet(result)
+ return regs
+
+ @clobbered_regs.setter
+ def clobbered_regs(self, value):
+ regs = core.BNRegisterSetWithConfidence()
+ regs.regs = (ctypes.c_uint * len(value))()
+ regs.count = len(value)
+ for i in range(0, len(value)):
+ regs.regs[i] = self.arch.get_reg_index(value[i])
+ if hasattr(value, 'confidence'):
+ regs.confidence = value.confidence
+ else:
+ regs.confidence = types.max_confidence
+ core.BNSetUserFunctionClobberedRegisters(self.handle, regs)
+
+ @property
+ def global_pointer_value(self):
+ """Discovered value of the global pointer register, if the function uses one (read-only)"""
+ result = core.BNGetFunctionGlobalPointerValue(self.handle)
+ return RegisterValue(self.arch, result.value, confidence = result.confidence)
+
+ @property
+ def comment(self):
+ """Gets the comment for the current function"""
+ return core.BNGetFunctionComment(self.handle)
+
+ @comment.setter
+ def comment(self, comment):
+ """Sets a comment for the current function"""
+ return core.BNSetFunctionComment(self.handle, comment)
+
+ @property
+ def llil_basic_blocks(self):
+ """A generator of all LowLevelILBasicBlock objects in the current function"""
+ for block in self.llil:
+ yield block
+
+ @property
+ def mlil_basic_blocks(self):
+ """A generator of all MediumLevelILBasicBlock objects in the current function"""
+ for block in self.mlil:
+ yield block
+
+ @property
+ def instructions(self):
+ """A generator of instruction tokens and their start addresses for the current function"""
+ for block in self.basic_blocks:
+ start = block.start
+ for i in block:
+ yield (i[0], start)
+ start += i[1]
+
+ @property
+ def llil_instructions(self):
+ """Deprecated method provided for compatibility. Use llil.instructions instead. Was: A generator of llil instructions of the current function"""
+ return self.llil.instructions
+
+ @property
+ def mlil_instructions(self):
+ """Deprecated method provided for compatibility. Use mlil.instructions instead. Was: A generator of mlil instructions of the current function"""
+ return self.mlil.instructions
+
+ @property
+ def too_large(self):
+ """Whether the function is too large to automatically perform analysis (read-only)"""
+ return core.BNIsFunctionTooLarge(self.handle)
+
+ @property
+ def analysis_skipped(self):
+ """Whether automatic analysis was skipped for this function"""
+ return core.BNIsFunctionAnalysisSkipped(self.handle)
+
+ @property
+ def analysis_skip_reason(self):
+ """Function analysis skip reason"""
+ return AnalysisSkipReason(core.BNGetAnalysisSkipReason(self.handle))
+
+ @analysis_skipped.setter
+ def analysis_skipped(self, skip):
+ if skip:
+ core.BNSetFunctionAnalysisSkipOverride(self.handle, FunctionAnalysisSkipOverride.AlwaysSkipFunctionAnalysis)
+ else:
+ core.BNSetFunctionAnalysisSkipOverride(self.handle, FunctionAnalysisSkipOverride.NeverSkipFunctionAnalysis)
+
+ @property
+ def analysis_skip_override(self):
+ """Override for skipping of automatic analysis"""
+ return FunctionAnalysisSkipOverride(core.BNGetFunctionAnalysisSkipOverride(self.handle))
+
+ @analysis_skip_override.setter
+ def analysis_skip_override(self, override):
+ core.BNSetFunctionAnalysisSkipOverride(self.handle, override)
+
+ @property
+ def unresolved_stack_adjustment_graph(self):
+ """Flow graph of unresolved stack adjustments (read-only)"""
+ graph = core.BNGetUnresolvedStackAdjustmentGraph(self.handle)
+ if not graph:
+ return None
+ return binaryninja.flowgraph.CoreFlowGraph(graph)
+
+
+
+
+
+[docs] def set_comment(self, addr, comment):
+ """Deprecated method provided for compatibility. Use set_comment_at instead."""
+ core.BNSetCommentForAddress(self.handle, addr, comment)
+
+[docs] def set_comment_at(self, addr, comment):
+ """
+ ``set_comment_at`` sets a comment for the current function at the address specified
+
+ :param addr int: virtual address within the current function to apply the comment to
+ :param comment str: string comment to apply
+ :rtype: None
+ :Example:
+
+ >>> current_function.set_comment_at(here, "hi")
+
+ """
+ core.BNSetCommentForAddress(self.handle, addr, comment)
+
+[docs] def get_low_level_il_at(self, addr, arch=None):
+ """
+ ``get_low_level_il_at`` gets the LowLevelILInstruction corresponding to the given virtual address
+
+ :param int addr: virtual address of the function to be queried
+ :param Architecture arch: (optional) Architecture for the given function
+ :rtype: LowLevelILInstruction
+ :Example:
+
+ >>> func = bv.functions[0]
+ >>> func.get_low_level_il_at(func.start)
+ <il: push(rbp)>
+ """
+ if arch is None:
+ arch = self.arch
+
+ idx = core.BNGetLowLevelILForInstruction(self.handle, arch.handle, addr)
+
+ if idx == len(self.llil):
+ return None
+
+ return self.llil[idx]
+
+[docs] def get_low_level_il_exits_at(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ count = ctypes.c_ulonglong()
+ exits = core.BNGetLowLevelILExitsForInstruction(self.handle, arch.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(exits[i])
+ core.BNFreeILInstructionList(exits)
+ return result
+
+[docs] def get_reg_value_at(self, addr, reg, arch=None):
+ """
+ ``get_reg_value_at`` gets the value the provided string register address corresponding to the given virtual address
+
+ :param int addr: virtual address of the instruction to query
+ :param str reg: string value of native register to query
+ :param Architecture arch: (optional) Architecture for the given function
+ :rtype: binaryninja.function.RegisterValue
+ :Example:
+
+ >>> func.get_reg_value_at(0x400dbe, 'rdi')
+ <const 0x2>
+ """
+ if arch is None:
+ arch = self.arch
+ reg = arch.get_reg_index(reg)
+ value = core.BNGetRegisterValueAtInstruction(self.handle, arch.handle, addr, reg)
+ result = RegisterValue(arch, value)
+ return result
+
+[docs] def get_reg_value_after(self, addr, reg, arch=None):
+ """
+ ``get_reg_value_after`` gets the value instruction address corresponding to the given virtual address
+
+ :param int addr: virtual address of the instruction to query
+ :param str reg: string value of native register to query
+ :param Architecture arch: (optional) Architecture for the given function
+ :rtype: binaryninja.function.RegisterValue
+ :Example:
+
+ >>> func.get_reg_value_after(0x400dbe, 'rdi')
+ <undetermined>
+ """
+ if arch is None:
+ arch = self.arch
+ reg = arch.get_reg_index(reg)
+ value = core.BNGetRegisterValueAfterInstruction(self.handle, arch.handle, addr, reg)
+ result = RegisterValue(arch, value)
+ return result
+
+[docs] def get_stack_contents_at(self, addr, offset, size, arch=None):
+ """
+ ``get_stack_contents_at`` returns the RegisterValue for the item on the stack in the current function at the
+ given virtual address ``addr``, stack offset ``offset`` and size of ``size``. Optionally specifying the architecture.
+
+ :param int addr: virtual address of the instruction to query
+ :param int offset: stack offset base of stack
+ :param int size: size of memory to query
+ :param Architecture arch: (optional) Architecture for the given function
+ :rtype: binaryninja.function.RegisterValue
+
+ .. note:: Stack base is zero on entry into the function unless the architecture places the return address on the
+ stack as in (x86/x86_64) where the stack base will start at address_size
+
+ :Example:
+
+ >>> func.get_stack_contents_at(0x400fad, -16, 4)
+ <range: 0x8 to 0xffffffff>
+ """
+ if arch is None:
+ arch = self.arch
+ value = core.BNGetStackContentsAtInstruction(self.handle, arch.handle, addr, offset, size)
+ result = RegisterValue(arch, value)
+ return result
+
+[docs] def get_stack_contents_after(self, addr, offset, size, arch=None):
+ if arch is None:
+ arch = self.arch
+ value = core.BNGetStackContentsAfterInstruction(self.handle, arch.handle, addr, offset, size)
+ result = RegisterValue(arch, value)
+ return result
+
+[docs] def get_parameter_at(self, addr, func_type, i, arch=None):
+ if arch is None:
+ arch = self.arch
+ if func_type is not None:
+ func_type = func_type.handle
+ value = core.BNGetParameterValueAtInstruction(self.handle, arch.handle, addr, func_type, i)
+ result = RegisterValue(arch, value)
+ return result
+
+[docs] def get_parameter_at_low_level_il_instruction(self, instr, func_type, i):
+ if func_type is not None:
+ func_type = func_type.handle
+ value = core.BNGetParameterValueAtLowLevelILInstruction(self.handle, instr, func_type, i)
+ result = RegisterValue(self.arch, value)
+ return result
+
+[docs] def get_regs_read_by(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetRegistersReadByInstruction(self.handle, arch.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(arch.get_reg_name(regs[i]))
+ core.BNFreeRegisterList(regs)
+ return result
+
+[docs] def get_regs_written_by(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ count = ctypes.c_ulonglong()
+ regs = core.BNGetRegistersWrittenByInstruction(self.handle, arch.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(arch.get_reg_name(regs[i]))
+ core.BNFreeRegisterList(regs)
+ return result
+
+[docs] def get_stack_vars_referenced_by(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ count = ctypes.c_ulonglong()
+ refs = core.BNGetStackVariablesReferencedByInstruction(self.handle, arch.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ var_type = types.Type(core.BNNewTypeReference(refs[i].type), platform = self.platform, confidence = refs[i].typeConfidence)
+ result.append(StackVariableReference(refs[i].sourceOperand, var_type,
+ refs[i].name, Variable.from_identifier(self, refs[i].varIdentifier, refs[i].name, var_type),
+ refs[i].referencedOffset, refs[i].size))
+ core.BNFreeStackVariableReferenceList(refs, count.value)
+ return result
+
+[docs] def get_constants_referenced_by(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ count = ctypes.c_ulonglong()
+ refs = core.BNGetConstantsReferencedByInstruction(self.handle, arch.handle, addr, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(ConstantReference(refs[i].value, refs[i].size, refs[i].pointer, refs[i].intermediate))
+ core.BNFreeConstantReferenceList(refs)
+ return result
+
+[docs] def get_lifted_il_at(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+
+ idx = core.BNGetLiftedILForInstruction(self.handle, arch.handle, addr)
+
+ if idx == len(self.lifted_il):
+ return None
+
+ return self.lifted_il[idx]
+
+[docs] def get_lifted_il_flag_uses_for_definition(self, i, flag):
+ flag = self.arch.get_flag_index(flag)
+ count = ctypes.c_ulonglong()
+ instrs = core.BNGetLiftedILFlagUsesForDefinition(self.handle, i, flag, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(instrs[i])
+ core.BNFreeILInstructionList(instrs)
+ return result
+
+[docs] def get_lifted_il_flag_definitions_for_use(self, i, flag):
+ flag = self.arch.get_flag_index(flag)
+ count = ctypes.c_ulonglong()
+ instrs = core.BNGetLiftedILFlagDefinitionsForUse(self.handle, i, flag, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(instrs[i])
+ core.BNFreeILInstructionList(instrs)
+ return result
+
+[docs] def get_flags_read_by_lifted_il_instruction(self, i):
+ count = ctypes.c_ulonglong()
+ flags = core.BNGetFlagsReadByLiftedILInstruction(self.handle, i, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(self.arch._flags_by_index[flags[i]])
+ core.BNFreeRegisterList(flags)
+ return result
+
+[docs] def get_flags_written_by_lifted_il_instruction(self, i):
+ count = ctypes.c_ulonglong()
+ flags = core.BNGetFlagsWrittenByLiftedILInstruction(self.handle, i, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(self.arch._flags_by_index[flags[i]])
+ core.BNFreeRegisterList(flags)
+ return result
+
+[docs] def create_graph(self, graph_type = FunctionGraphType.NormalFunctionGraph, settings = None):
+ if settings is not None:
+ settings_obj = settings.handle
+ else:
+ settings_obj = None
+ return binaryninja.flowgraph.CoreFlowGraph(core.BNCreateFunctionGraph(self.handle, graph_type, settings_obj))
+
+
+
+[docs] def apply_auto_discovered_type(self, func_type):
+ core.BNApplyAutoDiscoveredFunctionType(self.handle, func_type.handle)
+
+[docs] def set_auto_indirect_branches(self, source, branches, source_arch=None):
+ if source_arch is None:
+ source_arch = self.arch
+ branch_list = (core.BNArchitectureAndAddress * len(branches))()
+ for i in range(len(branches)):
+ branch_list[i].arch = branches[i][0].handle
+ branch_list[i].address = branches[i][1]
+ core.BNSetAutoIndirectBranches(self.handle, source_arch.handle, source, branch_list, len(branches))
+
+[docs] def set_user_indirect_branches(self, source, branches, source_arch=None):
+ if source_arch is None:
+ source_arch = self.arch
+ branch_list = (core.BNArchitectureAndAddress * len(branches))()
+ for i in range(len(branches)):
+ branch_list[i].arch = branches[i][0].handle
+ branch_list[i].address = branches[i][1]
+ core.BNSetUserIndirectBranches(self.handle, source_arch.handle, source, branch_list, len(branches))
+
+[docs] def get_indirect_branches_at(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ count = ctypes.c_ulonglong()
+ branches = core.BNGetIndirectBranchesAt(self.handle, arch.handle, addr, count)
+ result = []
+ for i in range(count.value):
+ result.append(IndirectBranchInfo(binaryninja.architecture.CoreArchitecture._from_cache(branches[i].sourceArch), branches[i].sourceAddr, binaryninja.architecture.CoreArchitecture._from_cache(branches[i].destArch), branches[i].destAddr, branches[i].autoDefined))
+ core.BNFreeIndirectBranchList(branches)
+ return result
+
+[docs] def get_block_annotations(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ count = ctypes.c_ulonglong(0)
+ lines = core.BNGetFunctionBlockAnnotations(self.handle, arch.handle, addr, count)
+ result = []
+ for i in range(count.value):
+ result.append(InstructionTextToken.get_instruction_lines(lines[i].tokens, lines[i].count))
+ core.BNFreeInstructionTextLines(lines, count.value)
+ return result
+
+
+
+
+
+[docs] def set_auto_return_type(self, value):
+ type_conf = core.BNTypeWithConfidence()
+ if value is None:
+ type_conf.type = None
+ type_conf.confidence = 0
+ else:
+ type_conf.type = value.handle
+ type_conf.confidence = value.confidence
+ core.BNSetAutoFunctionReturnType(self.handle, type_conf)
+
+[docs] def set_auto_return_regs(self, value):
+ regs = core.BNRegisterSetWithConfidence()
+ regs.regs = (ctypes.c_uint * len(value))()
+ regs.count = len(value)
+ for i in range(0, len(value)):
+ regs.regs[i] = self.arch.get_reg_index(value[i])
+ if hasattr(value, 'confidence'):
+ regs.confidence = value.confidence
+ else:
+ regs.confidence = types.max_confidence
+ core.BNSetAutoFunctionReturnRegisters(self.handle, regs)
+
+[docs] def set_auto_calling_convention(self, value):
+ conv_conf = core.BNCallingConventionWithConfidence()
+ if value is None:
+ conv_conf.convention = None
+ conv_conf.confidence = 0
+ else:
+ conv_conf.convention = value.handle
+ conv_conf.confidence = value.confidence
+ core.BNSetAutoFunctionCallingConvention(self.handle, conv_conf)
+
+[docs] def set_auto_parameter_vars(self, value):
+ if value is None:
+ var_list = []
+ else:
+ var_list = list(value)
+ var_conf = core.BNParameterVariablesWithConfidence()
+ var_conf.vars = (core.BNVariable * len(var_list))()
+ var_conf.count = len(var_list)
+ for i in range(0, len(var_list)):
+ var_conf.vars[i].type = var_list[i].source_type
+ var_conf.vars[i].index = var_list[i].index
+ var_conf.vars[i].storage = var_list[i].storage
+ if value is None:
+ var_conf.confidence = 0
+ elif hasattr(value, 'confidence'):
+ var_conf.confidence = value.confidence
+ else:
+ var_conf.confidence = types.max_confidence
+ core.BNSetAutoFunctionParameterVariables(self.handle, var_conf)
+
+[docs] def set_auto_has_variable_arguments(self, value):
+ bc = core.BNBoolWithConfidence()
+ bc.value = bool(value)
+ if hasattr(value, 'confidence'):
+ bc.confidence = value.confidence
+ else:
+ bc.confidence = types.max_confidence
+ core.BNSetAutoFunctionHasVariableArguments(self.handle, bc)
+
+[docs] def set_auto_can_return(self, value):
+ bc = core.BNBoolWithConfidence()
+ bc.value = bool(value)
+ if hasattr(value, 'confidence'):
+ bc.confidence = value.confidence
+ else:
+ bc.confidence = types.max_confidence
+ core.BNSetAutoFunctionCanReturn(self.handle, bc)
+
+[docs] def set_auto_stack_adjustment(self, value):
+ oc = core.BNOffsetWithConfidence()
+ oc.value = int(value)
+ if hasattr(value, 'confidence'):
+ oc.confidence = value.confidence
+ else:
+ oc.confidence = types.max_confidence
+ core.BNSetAutoFunctionStackAdjustment(self.handle, oc)
+
+[docs] def set_auto_reg_stack_adjustments(self, value):
+ adjust = (core.BNRegisterStackAdjustment * len(value))()
+ i = 0
+ for reg_stack in value.keys():
+ adjust[i].regStack = self.arch.get_reg_stack_index(reg_stack)
+ if isinstance(value[reg_stack], types.RegisterStackAdjustmentWithConfidence):
+ adjust[i].adjustment = value[reg_stack].value
+ adjust[i].confidence = value[reg_stack].confidence
+ else:
+ adjust[i].adjustment = value[reg_stack]
+ adjust[i].confidence = types.max_confidence
+ i += 1
+ core.BNSetAutoFunctionRegisterStackAdjustments(self.handle, adjust, len(value))
+
+[docs] def set_auto_clobbered_regs(self, value):
+ regs = core.BNRegisterSetWithConfidence()
+ regs.regs = (ctypes.c_uint * len(value))()
+ regs.count = len(value)
+ for i in range(0, len(value)):
+ regs.regs[i] = self.arch.get_reg_index(value[i])
+ if hasattr(value, 'confidence'):
+ regs.confidence = value.confidence
+ else:
+ regs.confidence = types.max_confidence
+ core.BNSetAutoFunctionClobberedRegisters(self.handle, regs)
+
+[docs] def get_int_display_type(self, instr_addr, value, operand, arch=None):
+ if arch is None:
+ arch = self.arch
+ return IntegerDisplayType(core.BNGetIntegerConstantDisplayType(self.handle, arch.handle, instr_addr, value, operand))
+
+[docs] def set_int_display_type(self, instr_addr, value, operand, display_type, arch=None):
+ """
+
+ :param int instr_addr:
+ :param int value:
+ :param int operand:
+ :param enums.IntegerDisplayType display_type:
+ :param Architecture arch: (optional)
+ """
+ if arch is None:
+ arch = self.arch
+ if isinstance(display_type, str):
+ display_type = IntegerDisplayType[display_type]
+ core.BNSetIntegerConstantDisplayType(self.handle, arch.handle, instr_addr, value, operand, display_type)
+
+[docs] def reanalyze(self):
+ """
+ ``reanalyze`` causes this functions to be reanalyzed. This function does not wait for the analysis to finish.
+
+ :rtype: None
+ """
+ core.BNReanalyzeFunction(self.handle)
+
+[docs] def request_advanced_analysis_data(self):
+ core.BNRequestAdvancedFunctionAnalysisData(self.handle)
+ self._advanced_analysis_requests += 1
+
+[docs] def release_advanced_analysis_data(self):
+ core.BNReleaseAdvancedFunctionAnalysisData(self.handle)
+ self._advanced_analysis_requests -= 1
+
+[docs] def get_basic_block_at(self, addr, arch=None):
+ """
+ ``get_basic_block_at`` returns the BasicBlock of the optionally specified Architecture ``arch`` at the given
+ address ``addr``.
+
+ :param int addr: Address of the BasicBlock to retrieve.
+ :param Architecture arch: (optional) Architecture of the basic block if different from the Function's self.arch
+ :Example:
+ >>> current_function.get_basic_block_at(current_function.start)
+ <block: x86_64@0x100000f30-0x100000f50>
+ """
+ if arch is None:
+ arch = self.arch
+ block = core.BNGetFunctionBasicBlockAtAddress(self.handle, arch.handle, addr)
+ if not block:
+ return None
+ return binaryninja.basicblock.BasicBlock(block, self._view)
+
+[docs] def get_instr_highlight(self, addr, arch=None):
+ """
+ :Example:
+ >>> current_function.set_user_instr_highlight(here, highlight.HighlightColor(red=0xff, blue=0xff, green=0))
+ >>> current_function.get_instr_highlight(here)
+ <color: #ff00ff>
+ """
+ if arch is None:
+ arch = self.arch
+ color = core.BNGetInstructionHighlight(self.handle, arch.handle, addr)
+ if color.style == HighlightColorStyle.StandardHighlightColor:
+ return highlight.HighlightColor(color = color.color, alpha = color.alpha)
+ elif color.style == HighlightColorStyle.MixedHighlightColor:
+ return highlight.HighlightColor(color = color.color, mix_color = color.mixColor, mix = color.mix, alpha = color.alpha)
+ elif color.style == HighlightColorStyle.CustomHighlightColor:
+ return highlight.HighlightColor(red = color.r, green = color.g, blue = color.b, alpha = color.alpha)
+ return highlight.HighlightColor(color = HighlightStandardColor.NoHighlightColor)
+
+[docs] def set_auto_instr_highlight(self, addr, color, arch=None):
+ """
+ ``set_auto_instr_highlight`` highlights the instruction at the specified address with the supplied color
+
+ ..warning:: Use only in analysis plugins. Do not use in regular plugins, as colors won't be saved to the database.
+
+ :param int addr: virtual address of the instruction to be highlighted
+ :param HighlightStandardColor or highlight.HighlightColor color: Color value to use for highlighting
+ :param Architecture arch: (optional) Architecture of the instruction if different from self.arch
+ """
+ if arch is None:
+ arch = self.arch
+ if not isinstance(color, HighlightStandardColor) and not isinstance(color, highlight.HighlightColor):
+ raise ValueError("Specified color is not one of HighlightStandardColor, highlight.HighlightColor")
+ if isinstance(color, HighlightStandardColor):
+ color = highlight.HighlightColor(color = color)
+ core.BNSetAutoInstructionHighlight(self.handle, arch.handle, addr, color._get_core_struct())
+
+[docs] def set_user_instr_highlight(self, addr, color, arch=None):
+ """
+ ``set_user_instr_highlight`` highlights the instruction at the specified address with the supplied color
+
+ :param int addr: virtual address of the instruction to be highlighted
+ :param HighlightStandardColor or highlight.HighlightColor color: Color value to use for highlighting
+ :param Architecture arch: (optional) Architecture of the instruction if different from self.arch
+ :Example:
+
+ >>> current_function.set_user_instr_highlight(here, HighlightStandardColor.BlueHighlightColor)
+ >>> current_function.set_user_instr_highlight(here, highlight.HighlightColor(red=0xff, blue=0xff, green=0))
+ """
+ if arch is None:
+ arch = self.arch
+ if not isinstance(color, HighlightStandardColor) and not isinstance(color, highlight.HighlightColor):
+ raise ValueError("Specified color is not one of HighlightStandardColor, highlight.HighlightColor")
+ if isinstance(color, HighlightStandardColor):
+ color = highlight.HighlightColor(color)
+ core.BNSetUserInstructionHighlight(self.handle, arch.handle, addr, color._get_core_struct())
+
+[docs] def create_auto_stack_var(self, offset, var_type, name):
+ tc = core.BNTypeWithConfidence()
+ tc.type = var_type.handle
+ tc.confidence = var_type.confidence
+ core.BNCreateAutoStackVariable(self.handle, offset, tc, name)
+
+[docs] def create_user_stack_var(self, offset, var_type, name):
+ tc = core.BNTypeWithConfidence()
+ tc.type = var_type.handle
+ tc.confidence = var_type.confidence
+ core.BNCreateUserStackVariable(self.handle, offset, tc, name)
+
+[docs] def delete_auto_stack_var(self, offset):
+ core.BNDeleteAutoStackVariable(self.handle, offset)
+
+[docs] def delete_user_stack_var(self, offset):
+ core.BNDeleteUserStackVariable(self.handle, offset)
+
+[docs] def create_auto_var(self, var, var_type, name, ignore_disjoint_uses = False):
+ var_data = core.BNVariable()
+ var_data.type = var.source_type
+ var_data.index = var.index
+ var_data.storage = var.storage
+ tc = core.BNTypeWithConfidence()
+ tc.type = var_type.handle
+ tc.confidence = var_type.confidence
+ core.BNCreateAutoVariable(self.handle, var_data, tc, name, ignore_disjoint_uses)
+
+[docs] def create_user_var(self, var, var_type, name, ignore_disjoint_uses = False):
+ var_data = core.BNVariable()
+ var_data.type = var.source_type
+ var_data.index = var.index
+ var_data.storage = var.storage
+ tc = core.BNTypeWithConfidence()
+ tc.type = var_type.handle
+ tc.confidence = var_type.confidence
+ core.BNCreateUserVariable(self.handle, var_data, tc, name, ignore_disjoint_uses)
+
+[docs] def delete_auto_var(self, var):
+ var_data = core.BNVariable()
+ var_data.type = var.source_type
+ var_data.index = var.index
+ var_data.storage = var.storage
+ core.BNDeleteAutoVariable(self.handle, var_data)
+
+[docs] def delete_user_var(self, var):
+ var_data = core.BNVariable()
+ var_data.type = var.source_type
+ var_data.index = var.index
+ var_data.storage = var.storage
+ core.BNDeleteUserVariable(self.handle, var_data)
+
+[docs] def get_stack_var_at_frame_offset(self, offset, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ found_var = core.BNVariableNameAndType()
+ if not core.BNGetStackVariableAtFrameOffset(self.handle, arch.handle, addr, offset, found_var):
+ return None
+ result = Variable(self, found_var.var.type, found_var.var.index, found_var.var.storage,
+ found_var.name, types.Type(handle = core.BNNewTypeReference(found_var.type), platform = self.platform,
+ confidence = found_var.typeConfidence))
+ core.BNFreeVariableNameAndType(found_var)
+ return result
+
+[docs] def get_type_tokens(self, settings=None):
+ if settings is not None:
+ settings = settings.handle
+ count = ctypes.c_ulonglong()
+ lines = core.BNGetFunctionTypeTokens(self.handle, settings, count)
+ result = []
+ for i in range(0, count.value):
+ addr = lines[i].addr
+ color = highlight.HighlightColor._from_core_struct(lines[i].highlight)
+ tokens = InstructionTextToken.get_instruction_lines(lines[i].tokens, lines[i].count)
+ result.append(DisassemblyTextLine(tokens, addr, color = color))
+ core.BNFreeDisassemblyTextLines(lines, count.value)
+ return result
+
+[docs] def get_reg_value_at_exit(self, reg):
+ result = core.BNGetFunctionRegisterValueAtExit(self.handle, self.arch.get_reg_index(reg))
+ return RegisterValue(self.arch, result.value, confidence = result.confidence)
+
+[docs] def set_auto_call_stack_adjustment(self, addr, adjust, arch=None):
+ if arch is None:
+ arch = self.arch
+ if not isinstance(adjust, types.SizeWithConfidence):
+ adjust = types.SizeWithConfidence(adjust)
+ core.BNSetAutoCallStackAdjustment(self.handle, arch.handle, addr, adjust.value, adjust.confidence)
+
+[docs] def set_auto_call_reg_stack_adjustment(self, addr, adjust, arch=None):
+ if arch is None:
+ arch = self.arch
+ adjust_buf = (core.BNRegisterStackAdjustment * len(adjust))()
+ i = 0
+ for reg_stack in adjust.keys():
+ adjust_buf[i].regStack = arch.get_reg_stack_index(reg_stack)
+ value = adjust[reg_stack]
+ if not isinstance(value, types.RegisterStackAdjustmentWithConfidence):
+ value = types.RegisterStackAdjustmentWithConfidence(value)
+ adjust_buf[i].adjustment = value.value
+ adjust_buf[i].confidence = value.confidence
+ i += 1
+ core.BNSetAutoCallRegisterStackAdjustment(self.handle, arch.handle, addr, adjust_buf, len(adjust))
+
+[docs] def set_auto_call_reg_stack_adjustment_for_reg_stack(self, addr, reg_stack, adjust, arch=None):
+ if arch is None:
+ arch = self.arch
+ reg_stack = arch.get_reg_stack_index(reg_stack)
+ if not isinstance(adjust, types.RegisterStackAdjustmentWithConfidence):
+ adjust = types.RegisterStackAdjustmentWithConfidence(adjust)
+ core.BNSetAutoCallRegisterStackAdjustmentForRegisterStack(self.handle, arch.handle, addr, reg_stack,
+ adjust.value, adjust.confidence)
+
+[docs] def set_call_stack_adjustment(self, addr, adjust, arch=None):
+ if arch is None:
+ arch = self.arch
+ if not isinstance(adjust, types.SizeWithConfidence):
+ adjust = types.SizeWithConfidence(adjust)
+ core.BNSetUserCallStackAdjustment(self.handle, arch.handle, addr, adjust.value, adjust.confidence)
+
+[docs] def set_call_reg_stack_adjustment(self, addr, adjust, arch=None):
+ if arch is None:
+ arch = self.arch
+ adjust_buf = (core.BNRegisterStackAdjustment * len(adjust))()
+ i = 0
+ for reg_stack in adjust.keys():
+ adjust_buf[i].regStack = arch.get_reg_stack_index(reg_stack)
+ value = adjust[reg_stack]
+ if not isinstance(value, types.RegisterStackAdjustmentWithConfidence):
+ value = types.RegisterStackAdjustmentWithConfidence(value)
+ adjust_buf[i].adjustment = value.value
+ adjust_buf[i].confidence = value.confidence
+ i += 1
+ core.BNSetUserCallRegisterStackAdjustment(self.handle, arch.handle, addr, adjust_buf, len(adjust))
+
+[docs] def set_call_reg_stack_adjustment_for_reg_stack(self, addr, reg_stack, adjust, arch=None):
+ if arch is None:
+ arch = self.arch
+ reg_stack = arch.get_reg_stack_index(reg_stack)
+ if not isinstance(adjust, types.RegisterStackAdjustmentWithConfidence):
+ adjust = types.RegisterStackAdjustmentWithConfidence(adjust)
+ core.BNSetUserCallRegisterStackAdjustmentForRegisterStack(self.handle, arch.handle, addr, reg_stack,
+ adjust.value, adjust.confidence)
+
+[docs] def get_call_stack_adjustment(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ result = core.BNGetCallStackAdjustment(self.handle, arch.handle, addr)
+ return types.SizeWithConfidence(result.value, confidence = result.confidence)
+
+[docs] def get_call_reg_stack_adjustment(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ count = ctypes.c_ulonglong()
+ adjust = core.BNGetCallRegisterStackAdjustment(self.handle, arch.handle, addr, count)
+ result = {}
+ for i in range(0, count.value):
+ result[arch.get_reg_stack_name(adjust[i].regStack)] = types.RegisterStackAdjustmentWithConfidence(
+ adjust[i].adjustment, confidence = adjust[i].confidence)
+ core.BNFreeRegisterStackAdjustments(adjust)
+ return result
+
+[docs] def get_call_reg_stack_adjustment_for_reg_stack(self, addr, reg_stack, arch=None):
+ if arch is None:
+ arch = self.arch
+ reg_stack = arch.get_reg_stack_index(reg_stack)
+ adjust = core.BNGetCallRegisterStackAdjustmentForRegisterStack(self.handle, arch.handle, addr, reg_stack)
+ result = types.RegisterStackAdjustmentWithConfidence(adjust.adjustment, confidence = adjust.confidence)
+ return result
+
+[docs] def is_call_instruction(self, addr, arch=None):
+ if arch is None:
+ arch = self.arch
+ return core.BNIsCallInstruction(self.handle, arch.handle, addr)
+
+[docs] def request_debug_report(self, name):
+ core.BNRequestFunctionDebugReport(self.handle, name)
+ self.view.update_analysis()
+
+
+[docs]class AdvancedFunctionAnalysisDataRequestor(object):
+[docs] def __init__(self, func = None):
+ self._function = func
+ if self._function is not None:
+ self._function.request_advanced_analysis_data()
+
+ def __del__(self):
+ if self._function is not None:
+ self._function.release_advanced_analysis_data()
+
+ @property
+ def function(self):
+ return self._function
+
+ @function.setter
+ def function(self, func):
+ if self._function is not None:
+ self._function.release_advanced_analysis_data()
+ self._function = func
+ if self._function is not None:
+ self._function.request_advanced_analysis_data()
+
+[docs] def close(self):
+ if self._function is not None:
+ self._function.release_advanced_analysis_data()
+ self._function = None
+
+
+[docs]class DisassemblyTextLine(object):
+[docs] def __init__(self, tokens, address = None, il_instr = None, color = None):
+ self.address = address
+ self.tokens = tokens
+ self.il_instruction = il_instr
+ if color is None:
+ self.highlight = highlight.HighlightColor()
+ else:
+ if not isinstance(color, HighlightStandardColor) and not isinstance(color, highlight.HighlightColor):
+ raise ValueError("Specified color is not one of HighlightStandardColor, highlight.HighlightColor")
+ if isinstance(color, HighlightStandardColor):
+ color = highlight.HighlightColor(color)
+ self.highlight = color
+
+ def __str__(self):
+ result = ""
+ for token in self.tokens:
+ result += token.text
+ return result
+
+ def __repr__(self):
+ if self.address is None:
+ return str(self)
+ return "<%#x: %s>" % (self.address, str(self))
+
+
+[docs]class DisassemblySettings(object):
+[docs] def __init__(self, handle = None):
+ if handle is None:
+ self.handle = core.BNCreateDisassemblySettings()
+ else:
+ self.handle = handle
+
+ def __del__(self):
+ core.BNFreeDisassemblySettings(self.handle)
+
+ @property
+ def width(self):
+ return core.BNGetDisassemblyWidth(self.handle)
+
+ @width.setter
+ def width(self, value):
+ core.BNSetDisassemblyWidth(self.handle, value)
+
+ @property
+ def max_symbol_width(self):
+ return core.BNGetDisassemblyMaximumSymbolWidth(self.handle)
+
+ @max_symbol_width.setter
+ def max_symbol_width(self, value):
+ core.BNSetDisassemblyMaximumSymbolWidth(self.handle, value)
+
+[docs] def is_option_set(self, option):
+ if isinstance(option, str):
+ option = DisassemblyOption[option]
+ return core.BNIsDisassemblySettingsOptionSet(self.handle, option)
+
+[docs] def set_option(self, option, state = True):
+ if isinstance(option, str):
+ option = DisassemblyOption[option]
+ core.BNSetDisassemblySettingsOption(self.handle, option, state)
+
+
+[docs]class RegisterInfo(object):
+[docs] def __init__(self, full_width_reg, size, offset=0, extend=ImplicitRegisterExtend.NoExtend, index=None):
+ self.full_width_reg = full_width_reg
+ self.offset = offset
+ self.size = size
+ self.extend = extend
+ self.index = index
+
+ def __repr__(self):
+ if self.extend == ImplicitRegisterExtend.ZeroExtendToFullWidth:
+ extend = ", zero extend"
+ elif self.extend == ImplicitRegisterExtend.SignExtendToFullWidth:
+ extend = ", sign extend"
+ else:
+ extend = ""
+ return "<reg: size %d, offset %d in %s%s>" % (self.size, self.offset, self.full_width_reg, extend)
+
+
+[docs]class RegisterStackInfo(object):
+[docs] def __init__(self, storage_regs, top_relative_regs, stack_top_reg, index=None):
+ self.storage_regs = storage_regs
+ self.top_relative_regs = top_relative_regs
+ self.stack_top_reg = stack_top_reg
+ self.index = index
+
+ def __repr__(self):
+ return "<reg stack: %d regs, stack top in %s>" % (len(self.storage_regs), self.stack_top_reg)
+
+
+[docs]class IntrinsicInput(object):
+
+
+ def __repr__(self):
+ if len(self.name) == 0:
+ return "<input: %s>" % str(self.type)
+ return "<input: %s %s>" % (str(self.type), self.name)
+
+
+[docs]class IntrinsicInfo(object):
+[docs] def __init__(self, inputs, outputs, index=None):
+ self.inputs = inputs
+ self.outputs = outputs
+ self.index = index
+
+ def __repr__(self):
+ return "<intrinsic: %s -> %s>" % (repr(self.inputs), repr(self.outputs))
+
+
+[docs]class InstructionBranch(object):
+[docs] def __init__(self, branch_type, target = 0, arch = None):
+ self.type = branch_type
+ self.target = target
+ self.arch = arch
+
+ def __repr__(self):
+ branch_type = self.type
+ if self.arch is not None:
+ return "<%s: %s@%#x>" % (branch_type.name, self.arch.name, self.target)
+ return "<%s: %#x>" % (branch_type, self.target)
+
+
+[docs]class InstructionInfo(object):
+[docs] def __init__(self):
+ self.length = 0
+ self.arch_transition_by_target_addr = False
+ self.branch_delay = False
+ self.branches = []
+
+[docs] def add_branch(self, branch_type, target = 0, arch = None):
+ self.branches.append(InstructionBranch(branch_type, target, arch))
+
+ def __repr__(self):
+ branch_delay = ""
+ if self.branch_delay:
+ branch_delay = ", delay slot"
+ return "<instr: %d bytes%s, %s>" % (self.length, branch_delay, repr(self.branches))
+
+
+[docs]class InstructionTextToken(object):
+ """
+ ``class InstructionTextToken`` is used to tell the core about the various components in the disassembly views.
+
+ ========================== ============================================
+ InstructionTextTokenType Description
+ ========================== ============================================
+ TextToken Text that doesn't fit into the other tokens
+ InstructionToken The instruction mnemonic
+ OperandSeparatorToken The comma or whatever else separates tokens
+ RegisterToken Registers
+ IntegerToken Integers
+ PossibleAddressToken Integers that are likely addresses
+ BeginMemoryOperandToken The start of memory operand
+ EndMemoryOperandToken The end of a memory operand
+ FloatingPointToken Floating point number
+ AnnotationToken **For internal use only**
+ CodeRelativeAddressToken **For internal use only**
+ StackVariableTypeToken **For internal use only**
+ DataVariableTypeToken **For internal use only**
+ FunctionReturnTypeToken **For internal use only**
+ FunctionAttributeToken **For internal use only**
+ ArgumentTypeToken **For internal use only**
+ ArgumentNameToken **For internal use only**
+ HexDumpByteValueToken **For internal use only**
+ HexDumpSkippedByteToken **For internal use only**
+ HexDumpInvalidByteToken **For internal use only**
+ HexDumpTextToken **For internal use only**
+ OpcodeToken **For internal use only**
+ StringToken **For internal use only**
+ CharacterConstantToken **For internal use only**
+ CodeSymbolToken **For internal use only**
+ DataSymbolToken **For internal use only**
+ StackVariableToken **For internal use only**
+ ImportToken **For internal use only**
+ AddressDisplayToken **For internal use only**
+ ========================== ============================================
+
+ """
+[docs] def __init__(self, token_type, text, value = 0, size = 0, operand = 0xffffffff,
+ context = InstructionTextTokenContext.NoTokenContext, address = 0, confidence = types.max_confidence, typeNames=[]):
+ self.type = InstructionTextTokenType(token_type)
+ self.text = text
+ self.value = value
+ self.size = size
+ self.operand = operand
+ self.context = InstructionTextTokenContext(context)
+ self.confidence = confidence
+ self.address = address
+ self.typeNames = typeNames
+
+[docs] @classmethod
+ def get_instruction_lines(cls, tokens, count=0):
+ """ Helper method for converting between core.BNInstructionTextToken and InstructionTextToken lists """
+ if isinstance(tokens, list):
+ result = (core.BNInstructionTextToken * len(tokens))()
+ for j in range(len(tokens)):
+ result[j].type = tokens[j].type
+ result[j].text = tokens[j].text
+ result[j].value = tokens[j].value
+ result[j].size = tokens[j].size
+ result[j].operand = tokens[j].operand
+ result[j].context = tokens[j].context
+ result[j].confidence = tokens[j].confidence
+ result[j].address = tokens[j].address
+ result[j].nameCount = len(tokens[j].typeNames)
+ result[j].typeNames = (ctypes.c_char_p * len(tokens[j].typeNames))()
+ for i in range(len(tokens[j].typeNames)):
+ result[j].typeNames[i] = binaryninja.cstr(tokens[j].typeNames[i])
+ return result
+
+ result = []
+ for j in range(count):
+ token_type = InstructionTextTokenType(tokens[j].type)
+ text = tokens[j].text
+ if not isinstance(text, str):
+ text = text.decode("charmap")
+ value = tokens[j].value
+ size = tokens[j].size
+ operand = tokens[j].operand
+ context = tokens[j].context
+ confidence = tokens[j].confidence
+ address = tokens[j].address
+ typeNames = []
+ for i in range(tokens[j].namesCount):
+ if not isinstance(tokens[j].typeNames[i], str):
+ typeNames.append(tokens[j].typeNames[i].decode("charmap"))
+ else:
+ typeNames.append(tokens[j].typeNames[i])
+ result.append(InstructionTextToken(token_type, text, value, size, operand, context, address, confidence, typeNames))
+ return result
+
+ def __str__(self):
+ return self.text
+
+ def __repr__(self):
+ return repr(self.text)
+
+
+[docs]class DisassemblyTextRenderer(object):
+[docs] def __init__(self, func = None, settings = None, handle = None):
+ if handle is None:
+ if func is None:
+ raise ValueError("function required for disassembly")
+ settings_obj = None
+ if settings is not None:
+ settings_obj = settings.handle
+ if isinstance(func, Function):
+ self.handle = core.BNCreateDisassemblyTextRenderer(func.handle, settings_obj)
+ elif isinstance(func, binaryninja.lowlevelil.LowLevelILFunction):
+ self.handle = core.BNCreateLowLevelILDisassemblyTextRenderer(func.handle, settings_obj)
+ elif isinstance(func, binaryninja.mediumlevelil.MediumLevelILFunction):
+ self.handle = core.BNCreateMediumLevelILDisassemblyTextRenderer(func.handle, settings_obj)
+ else:
+ raise TypeError("invalid function object")
+ else:
+ self.handle = handle
+
+ def __del__(self):
+ core.BNFreeDisassemblyTextRenderer(self.handle)
+
+ @property
+ def function(self):
+ return Function(handle = core.BNGetDisassemblyTextRendererFunction(self.handle))
+
+ @property
+ def il_function(self):
+ llil = core.BNGetDisassemblyTextRendererLowLevelILFunction(self.handle)
+ if llil:
+ return binaryninja.lowlevelil.LowLevelILFunction(handle = llil)
+ mlil = core.BNGetDisassemblyTextRendererMediumLevelILFunction(self.handle)
+ if mlil:
+ return binaryninja.mediumlevelil.MediumLevelILFunction(handle = mlil)
+ return None
+
+ @property
+ def basic_block(self):
+ result = core.BNGetDisassemblyTextRendererBasicBlock(self.handle)
+ if result:
+ return binaryninja.basicblock.BasicBlock(handle = result)
+ return None
+
+ @basic_block.setter
+ def basic_block(self, block):
+ if block is not None:
+ core.BNSetDisassemblyTextRendererBasicBlock(self.handle, block.handle)
+ else:
+ core.BNSetDisassemblyTextRendererBasicBlock(self.handle, None)
+
+ @property
+ def arch(self):
+ return binaryninja.architecture.CoreArchitecture(handle = core.BNGetDisassemblyTextRendererArchitecture(self.handle))
+
+ @arch.setter
+ def arch(self, arch):
+ core.BNSetDisassemblyTextRendererArchitecture(self.handle, arch.handle)
+
+ @property
+ def settings(self):
+ return DisassemblySettings(handle = core.BNGetDisassemblyTextRendererSettings(self.handle))
+
+ @settings.setter
+ def settings(self, settings):
+ if settings is not None:
+ core.BNSetDisassemblyTextRendererSettings(self.handle, settings.handle)
+ core.BNSetDisassemblyTextRendererSettings(self.handle, None)
+
+ @property
+ def il(self):
+ return core.BNIsILDisassemblyTextRenderer(self.handle)
+
+ @property
+ def has_data_flow(self):
+ return core.BNDisassemblyTextRendererHasDataFlow(self.handle)
+
+[docs] def get_instruction_annotations(self, addr):
+ count = ctypes.c_ulonglong()
+ tokens = core.BNGetDisassemblyTextRendererInstructionAnnotations(self.handle, addr, count)
+ result = InstructionTextToken.get_instruction_lines(tokens, count.value)
+ core.BNFreeInstructionText(tokens, count.value)
+ return result
+
+[docs] def get_instruction_text(self, addr):
+ count = ctypes.c_ulonglong()
+ length = ctypes.c_ulonglong()
+ display_addr = ctypes.c_ulonglong()
+ tokens = ctypes.POINTER(core.BNInstructionTextToken)()
+ if not core.BNGetDisassemblyTextRendererInstructionText(self.handle, addr, length, tokens, count, display_addr):
+ return None, 0, 0
+ result = InstructionTextToken.get_instruction_lines(tokens, count.value)
+ core.BNFreeInstructionText(tokens, count.value)
+ return result, length.value, display_addr.value
+
+[docs] def get_disassembly_text(self, addr):
+ count = ctypes.c_ulonglong()
+ length = ctypes.c_ulonglong()
+ length.value = 0
+ lines = ctypes.POINTER(core.BNDisassemblyTextLine)()
+ ok = core.BNGetDisassemblyTextRendererLines(self.handle, addr, length, lines, count)
+ if not ok:
+ return None, 0
+ il_function = self.il_function
+ result = []
+ for i in range(0, count.value):
+ addr = lines[i].addr
+ if (lines[i].instrIndex != 0xffffffffffffffff) and (il_function is not None):
+ il_instr = il_function[lines[i].instrIndex]
+ else:
+ il_instr = None
+ color = highlight.HighlightColor._from_core_struct(lines[i].highlight)
+ tokens = InstructionTextToken.get_instruction_lines(lines[i].tokens, lines[i].count)
+ result.append(DisassemblyTextLine(tokens, addr, il_instr, color))
+ core.BNFreeDisassemblyTextLines(lines, count.value)
+ return (result, length.value)
+
+[docs] def reset_deduplicated_comments(self):
+ core.BNResetDisassemblyTextRendererDeduplicatedComments(self.handle)
+
+[docs] def add_symbol_token(self, tokens, addr, size, operand = None):
+ if operand is None:
+ operand = 0xffffffff
+ count = ctypes.c_ulonglong()
+ new_tokens = ctypes.POINTER(core.BNInstructionTextToken)()
+ if not core.BNGetDisassemblyTextRendererSymbolTokens(self.handle, addr, size, operand, new_tokens, count):
+ return False
+ result = binaryninja.function.InstructionTextToken.get_instruction_lines(new_tokens, count.value)
+ tokens += result
+ core.BNFreeInstructionText(new_tokens, count.value)
+ return True
+
+[docs] def add_stack_var_reference_tokens(self, tokens, ref):
+ stack_ref = core.BNStackVariableReference()
+ if ref.source_operand is None:
+ stack_ref.sourceOperand = 0xffffffff
+ else:
+ stack_ref.sourceOperand = ref.source_operand
+ if ref.type is None:
+ stack_ref.type = None
+ stack_ref.typeConfidence = 0
+ else:
+ stack_ref.type = ref.type.handle
+ stack_ref.typeConfidence = ref.type.confidence
+ stack_ref.name = ref.name
+ stack_ref.varIdentifier = ref.var.identifier
+ stack_ref.referencedOffset = ref.referenced_offset
+ stack_ref.size = ref.size
+ count = ctypes.c_ulonglong()
+ new_tokens = core.BNGetDisassemblyTextRendererStackVariableReferenceTokens(self.handle, stack_ref, count)
+ result = InstructionTextToken.get_instruction_lines(new_tokens, count.value)
+ tokens += result
+ core.BNFreeInstructionText(new_tokens, count.value)
+
+
+
+[docs] def add_integer_token(self, tokens, int_token, addr, arch = None):
+ if arch is not None:
+ arch = arch.handle
+ in_token_obj = InstructionTextToken.get_instruction_lines([int_token])
+ count = ctypes.c_ulonglong()
+ new_tokens = core.BNGetDisassemblyTextRendererIntegerTokens(self.handle, in_token_obj, arch, addr, count)
+ result = InstructionTextToken.get_instruction_lines(new_tokens, count.value)
+ tokens += result
+ core.BNFreeInstructionText(new_tokens, count.value)
+
+[docs] def wrap_comment(self, lines, cur_line, comment, has_auto_annotations, leading_spaces = " "):
+ cur_line_obj = core.BNDisassemblyTextLine()
+ cur_line_obj.addr = cur_line.address
+ if cur_line.il_instruction is None:
+ cur_line_obj.instrIndex = 0xffffffffffffffff
+ else:
+ cur_line_obj.instrIndex = cur_line.il_instruction.instr_index
+ cur_line_obj.highlight = cur_line.highlight._get_core_struct()
+ cur_line_obj.tokens = InstructionTextToken.get_instruction_lines(cur_line.tokens)
+ cur_line_obj.count = len(cur_line.tokens)
+ count = ctypes.c_ulonglong()
+ new_lines = core.BNDisassemblyTextRendererWrapComment(self.handle, cur_line_obj, count, comment,
+ has_auto_annotations, leading_spaces)
+ il_function = self.il_function
+ for i in range(0, count.value):
+ addr = new_lines[i].addr
+ if (new_lines[i].instrIndex != 0xffffffffffffffff) and (il_function is not None):
+ il_instr = il_function[new_lines[i].instrIndex]
+ else:
+ il_instr = None
+ color = highlight.HighlightColor._from_core_struct(new_lines[i].highlight)
+ tokens = InstructionTextToken.get_instruction_lines(new_lines[i].tokens, new_lines[i].count)
+ lines.append(DisassemblyTextLine(tokens, addr, il_instr, color))
+ core.BNFreeDisassemblyTextLines(new_lines, count.value)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import traceback
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+from binaryninja import function
+from binaryninja import filemetadata
+from binaryninja import binaryview
+from binaryninja import lowlevelil
+from binaryninja import log
+from binaryninja import mediumlevelil
+
+
+[docs]class FunctionRecognizer(object):
+
+ _instance = None
+
+[docs] def __init__(self):
+ self._cb = core.BNFunctionRecognizer()
+ self._cb.context = 0
+ self._cb.recognizeLowLevelIL = self._cb.recognizeLowLevelIL.__class__(self._recognize_low_level_il)
+ self._cb.recognizeMediumLevelIL = self._cb.recognizeMediumLevelIL.__class__(self._recognize_medium_level_il)
+
+[docs] @classmethod
+ def register_global(cls):
+ if cls._instance is None:
+ cls._instance = cls()
+ core.BNRegisterGlobalFunctionRecognizer(cls._instance._cb)
+
+[docs] @classmethod
+ def register_arch(cls, arch):
+ if cls._instance is None:
+ cls._instance = cls()
+ core.BNRegisterArchitectureFunctionRecognizer(arch.handle, cls._instance._cb)
+
+ def _recognize_low_level_il(self, ctxt, data, func, il):
+ try:
+ file_metadata = filemetadata.FileMetadata(handle = core.BNGetFileForView(data))
+ view = binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(data))
+ func = function.Function(view, handle = core.BNNewFunctionReference(func))
+ il = lowlevelil.LowLevelILFunction(func.arch, handle = core.BNNewLowLevelILFunctionReference(il))
+ return self.recognize_low_level_il(view, func, il)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+
+
+ def _recognize_medium_level_il(self, ctxt, data, func, il):
+ try:
+ file_metadata = filemetadata.FileMetadata(handle = core.BNGetFileForView(data))
+ view = binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(data))
+ func = function.Function(view, handle = core.BNNewFunctionReference(func))
+ il = mediumlevelil.MediumLevelILFunction(func.arch, handle = core.BNNewMediumLevelILFunctionReference(il))
+ return self.recognize_medium_level_il(view, func, il)
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import HighlightColorStyle, HighlightStandardColor
+
+
+[docs]class HighlightColor(object):
+[docs] def __init__(self, color = None, mix_color = None, mix = None, red = None, green = None, blue = None, alpha = 255):
+ if (red is not None) and (green is not None) and (blue is not None):
+ self.style = HighlightColorStyle.CustomHighlightColor
+ self.red = red
+ self.green = green
+ self.blue = blue
+ elif (mix_color is not None) and (mix is not None):
+ self.style = HighlightColorStyle.MixedHighlightColor
+ if color is None:
+ self.color = HighlightStandardColor.NoHighlightColor
+ else:
+ self.color = color
+ self.mix_color = mix_color
+ self.mix = mix
+ else:
+ self.style = HighlightColorStyle.StandardHighlightColor
+ if color is None:
+ self.color = HighlightStandardColor.NoHighlightColor
+ else:
+ self.color = color
+ self.alpha = alpha
+
+ def _standard_color_to_str(self, color):
+ if color == HighlightStandardColor.NoHighlightColor:
+ return "none"
+ if color == HighlightStandardColor.BlueHighlightColor:
+ return "blue"
+ if color == HighlightStandardColor.GreenHighlightColor:
+ return "green"
+ if color == HighlightStandardColor.CyanHighlightColor:
+ return "cyan"
+ if color == HighlightStandardColor.RedHighlightColor:
+ return "red"
+ if color == HighlightStandardColor.MagentaHighlightColor:
+ return "magenta"
+ if color == HighlightStandardColor.YellowHighlightColor:
+ return "yellow"
+ if color == HighlightStandardColor.OrangeHighlightColor:
+ return "orange"
+ if color == HighlightStandardColor.WhiteHighlightColor:
+ return "white"
+ if color == HighlightStandardColor.BlackHighlightColor:
+ return "black"
+ return "%d" % color
+
+ def __repr__(self):
+ if self.style == HighlightColorStyle.StandardHighlightColor:
+ if self.alpha == 255:
+ return "<color: %s>" % self._standard_color_to_str(self.color)
+ return "<color: %s, alpha %d>" % (self._standard_color_to_str(self.color), self.alpha)
+ if self.style == HighlightColorStyle.MixedHighlightColor:
+ if self.alpha == 255:
+ return "<color: mix %s to %s factor %d>" % (self._standard_color_to_str(self.color),
+ self._standard_color_to_str(self.mix_color), self.mix)
+ return "<color: mix %s to %s factor %d, alpha %d>" % (self._standard_color_to_str(self.color),
+ self._standard_color_to_str(self.mix_color), self.mix, self.alpha)
+ if self.style == HighlightColorStyle.CustomHighlightColor:
+ if self.alpha == 255:
+ return "<color: #%.2x%.2x%.2x>" % (self.red, self.green, self.blue)
+ return "<color: #%.2x%.2x%.2x, alpha %d>" % (self.red, self.green, self.blue, self.alpha)
+ return "<color>"
+
+ def _get_core_struct(self):
+ result = core.BNHighlightColor()
+ result.style = self.style
+ result.color = HighlightStandardColor.NoHighlightColor
+ result.mix_color = HighlightStandardColor.NoHighlightColor
+ result.mix = 0
+ result.r = 0
+ result.g = 0
+ result.b = 0
+ result.alpha = self.alpha
+
+ if self.style == HighlightColorStyle.StandardHighlightColor:
+ result.color = self.color
+ elif self.style == HighlightColorStyle.MixedHighlightColor:
+ result.color = self.color
+ result.mixColor = self.mix_color
+ result.mix = self.mix
+ elif self.style == HighlightColorStyle.CustomHighlightColor:
+ result.r = self.red
+ result.g = self.green
+ result.b = self.blue
+
+ return result
+
+ @staticmethod
+ def _from_core_struct(color):
+ if color.style == HighlightColorStyle.StandardHighlightColor:
+ return HighlightColor(color=color.color, alpha=color.alpha)
+ elif color.style == HighlightColorStyle.MixedHighlightColor:
+ return HighlightColor(color=color.color, mix_color=color.mixColor, mix=color.mix, alpha=color.alpha)
+ elif color.style == HighlightColorStyle.CustomHighlightColor:
+ return HighlightColor(red=color.r, green=color.g, blue=color.b, alpha=color.alpha)
+ return HighlightColor(color=HighlightStandardColor.NoHighlightColor)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import ctypes
+import traceback
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import FormInputFieldType, MessageBoxIcon, MessageBoxButtonSet, MessageBoxButtonResult, ReportType
+from binaryninja import binaryview
+from binaryninja import log
+from binaryninja import flowgraph
+
+# 2-3 compatibility
+from binaryninja import range
+
+
+[docs]class LabelField(object):
+ """
+ ``LabelField`` adds a text label to the display.
+ """
+
+
+ def _fill_core_struct(self, value):
+ value.type = FormInputFieldType.LabelFormField
+ value.prompt = self.text
+
+ def _fill_core_result(self, value):
+ pass
+
+ def _get_result(self, value):
+ pass
+
+
+[docs]class SeparatorField(object):
+ """
+ ``SeparatorField`` adds vertical separation to the display.
+ """
+ def _fill_core_struct(self, value):
+ value.type = FormInputFieldType.SeparatorFormField
+
+ def _fill_core_result(self, value):
+ pass
+
+ def _get_result(self, value):
+ pass
+
+
+[docs]class TextLineField(object):
+ """
+ ``TextLineField`` Adds prompt for text string input. Result is stored in self.result as a string on completion.
+ """
+
+
+ def _fill_core_struct(self, value):
+ value.type = FormInputFieldType.TextLineFormField
+ value.prompt = self.prompt
+
+ def _fill_core_result(self, value):
+ value.stringResult = core.BNAllocString(str(self.result))
+
+ def _get_result(self, value):
+ self.result = value.stringResult
+
+
+[docs]class MultilineTextField(object):
+ """
+ ``MultilineTextField`` add multi-line text string input field. Result is stored in self.result
+ as a string. This option is not supported on the command-line.
+ """
+
+
+ def _fill_core_struct(self, value):
+ value.type = FormInputFieldType.MultilineTextFormField
+ value.prompt = self.prompt
+
+ def _fill_core_result(self, value):
+ value.stringResult = core.BNAllocString(str(self.result))
+
+ def _get_result(self, value):
+ self.result = value.stringResult
+
+
+[docs]class IntegerField(object):
+ """
+ ``IntegerField`` add prompt for integer. Result is stored in self.result as an int.
+ """
+
+
+ def _fill_core_struct(self, value):
+ value.type = FormInputFieldType.IntegerFormField
+ value.prompt = self.prompt
+
+ def _fill_core_result(self, value):
+ value.intResult = self.result
+
+ def _get_result(self, value):
+ self.result = value.intResult
+
+
+[docs]class AddressField(object):
+ """
+ ``AddressField`` prompts the user for an address. By passing the optional view and current_address parameters
+ offsets can be used instead of just an address. The result is stored as in int in self.result.
+
+ Note: This API currently functions differently on the command-line, as the view and current_address are
+ disregarded. Additionally where as in the UI the result defaults to hexadecimal on the command-line 0x must be
+ specified.
+ """
+[docs] def __init__(self, prompt, view=None, current_address=0):
+ self.prompt = prompt
+ self.view = view
+ self.current_address = current_address
+ self.result = None
+
+ def _fill_core_struct(self, value):
+ value.type = FormInputFieldType.AddressFormField
+ value.prompt = self.prompt
+ value.view = None
+ if self.view is not None:
+ value.view = self.view.handle
+ value.currentAddress = self.current_address
+
+ def _fill_core_result(self, value):
+ value.addressResult = self.result
+
+ def _get_result(self, value):
+ self.result = value.addressResult
+
+
+[docs]class ChoiceField(object):
+ """
+ ``ChoiceField`` prompts the user to choose from the list of strings provided in ``choices``. Result is stored
+ in self.result as an index in to the choices array.
+
+ :attr str prompt: prompt to be presented to the user
+ :attr list(str) choices: list of choices to choose from
+
+ """
+[docs] def __init__(self, prompt, choices):
+ self.prompt = prompt
+ self.choices = choices
+ self.result = None
+
+ def _fill_core_struct(self, value):
+ value.type = FormInputFieldType.ChoiceFormField
+ value.prompt = self.prompt
+ choice_buf = (ctypes.c_char_p * len(self.choices))()
+ for i in range(0, len(self.choices)):
+ choice_buf[i] = self.choices[i].encode('charmap')
+ value.choices = choice_buf
+ value.count = len(self.choices)
+
+ def _fill_core_result(self, value):
+ value.indexResult = self.result
+
+ def _get_result(self, value):
+ self.result = value.indexResult
+
+
+[docs]class OpenFileNameField(object):
+ """
+ ``OpenFileNameField`` prompts the user to specify a file name to open. Result is stored in self.result as a string.
+ """
+[docs] def __init__(self, prompt, ext=""):
+ self.prompt = prompt
+ self.ext = ext
+ self.result = None
+
+ def _fill_core_struct(self, value):
+ value.type = FormInputFieldType.OpenFileNameFormField
+ value.prompt = self.prompt
+ value.ext = self.ext
+
+ def _fill_core_result(self, value):
+ value.stringResult = core.BNAllocString(str(self.result))
+
+ def _get_result(self, value):
+ self.result = value.stringResult
+
+
+[docs]class SaveFileNameField(object):
+ """
+ ``SaveFileNameField`` prompts the user to specify a file name to save. Result is stored in self.result as a string.
+ """
+[docs] def __init__(self, prompt, ext="", default_name=""):
+ self.prompt = prompt
+ self.ext = ext
+ self.default_name = default_name
+ self.result = None
+
+ def _fill_core_struct(self, value):
+ value.type = FormInputFieldType.SaveFileNameFormField
+ value.prompt = self.prompt
+ value.ext = self.ext
+ value.defaultName = self.default_name
+
+ def _fill_core_result(self, value):
+ value.stringResult = core.BNAllocString(str(self.result))
+
+ def _get_result(self, value):
+ self.result = value.stringResult
+
+
+[docs]class DirectoryNameField(object):
+ """
+ ``DirectoryNameField`` prompts the user to specify a directory name to open. Result is stored in self.result as
+ a string.
+ """
+[docs] def __init__(self, prompt, default_name=""):
+ self.prompt = prompt
+ self.default_name = default_name
+ self.result = None
+
+ def _fill_core_struct(self, value):
+ value.type = FormInputFieldType.DirectoryNameFormField
+ value.prompt = self.prompt
+ value.defaultName = self.default_name
+
+ def _fill_core_result(self, value):
+ value.stringResult = core.BNAllocString(str(self.result))
+
+ def _get_result(self, value):
+ self.result = value.stringResult
+
+
+[docs]class InteractionHandler(object):
+ _interaction_handler = None
+
+[docs] def __init__(self):
+ self._cb = core.BNInteractionHandlerCallbacks()
+ self._cb.context = 0
+ self._cb.showPlainTextReport = self._cb.showPlainTextReport.__class__(self._show_plain_text_report)
+ self._cb.showMarkdownReport = self._cb.showMarkdownReport.__class__(self._show_markdown_report)
+ self._cb.showHTMLReport = self._cb.showHTMLReport.__class__(self._show_html_report)
+ self._cb.showGraphReport = self._cb.showGraphReport.__class__(self._show_graph_report)
+ self._cb.showReportCollection = self._cb.showReportCollection.__class__(self._show_report_collection)
+ self._cb.getTextLineInput = self._cb.getTextLineInput.__class__(self._get_text_line_input)
+ self._cb.getIntegerInput = self._cb.getIntegerInput.__class__(self._get_int_input)
+ self._cb.getAddressInput = self._cb.getAddressInput.__class__(self._get_address_input)
+ self._cb.getChoiceInput = self._cb.getChoiceInput.__class__(self._get_choice_input)
+ self._cb.getOpenFileNameInput = self._cb.getOpenFileNameInput.__class__(self._get_open_filename_input)
+ self._cb.getSaveFileNameInput = self._cb.getSaveFileNameInput.__class__(self._get_save_filename_input)
+ self._cb.getDirectoryNameInput = self._cb.getDirectoryNameInput.__class__(self._get_directory_name_input)
+ self._cb.getFormInput = self._cb.getFormInput.__class__(self._get_form_input)
+ self._cb.showMessageBox = self._cb.showMessageBox.__class__(self._show_message_box)
+
+[docs] def register(self):
+ self.__class__._interaction_handler = self
+ core.BNRegisterInteractionHandler(self._cb)
+
+ def _show_plain_text_report(self, ctxt, view, title, contents):
+ try:
+ if view:
+ view = binaryview.BinaryView(handle = core.BNNewViewReference(view))
+ else:
+ view = None
+ self.show_plain_text_report(view, title, contents)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _show_markdown_report(self, ctxt, view, title, contents, plaintext):
+ try:
+ if view:
+ view = binaryview.BinaryView(handle = core.BNNewViewReference(view))
+ else:
+ view = None
+ self.show_markdown_report(view, title, contents, plaintext)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _show_html_report(self, ctxt, view, title, contents, plaintext):
+ try:
+ if view:
+ view = binaryview.BinaryView(handle = core.BNNewViewReference(view))
+ else:
+ view = None
+ self.show_html_report(view, title, contents, plaintext)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _show_graph_report(self, ctxt, view, title, graph):
+ try:
+ if view:
+ view = binaryview.BinaryView(handle = core.BNNewViewReference(view))
+ else:
+ view = None
+ self.show_graph_report(view, title, flowgraph.CoreFlowGraph(core.BNNewFlowGraphReference(graph)))
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _show_report_collection(self, ctxt, title, reports):
+ try:
+ self.show_report_collection(title, ReportCollection(core.BNNewReportCollectionReference(reports)))
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _get_text_line_input(self, ctxt, result, prompt, title):
+ try:
+ value = self.get_text_line_input(prompt, title)
+ if value is None:
+ return False
+ result[0] = core.BNAllocString(str(value))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _get_int_input(self, ctxt, result, prompt, title):
+ try:
+ value = self.get_int_input(prompt, title)
+ if value is None:
+ return False
+ result[0] = value
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _get_address_input(self, ctxt, result, prompt, title, view, current_address):
+ try:
+ if view:
+ view = binaryview.BinaryView(handle = core.BNNewViewReference(view))
+ else:
+ view = None
+ value = self.get_address_input(prompt, title, view, current_address)
+ if value is None:
+ return False
+ result[0] = value
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _get_choice_input(self, ctxt, result, prompt, title, choice_buf, count):
+ try:
+ choices = []
+ for i in range(0, count):
+ choices.append(choice_buf[i])
+ value = self.get_choice_input(prompt, title, choices)
+ if value is None:
+ return False
+ result[0] = value
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _get_open_filename_input(self, ctxt, result, prompt, ext):
+ try:
+ value = self.get_open_filename_input(prompt, ext)
+ if value is None:
+ return False
+ result[0] = core.BNAllocString(str(value))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _get_save_filename_input(self, ctxt, result, prompt, ext, default_name):
+ try:
+ value = self.get_save_filename_input(prompt, ext, default_name)
+ if value is None:
+ return False
+ result[0] = core.BNAllocString(str(value))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _get_directory_name_input(self, ctxt, result, prompt, default_name):
+ try:
+ value = self.get_directory_name_input(prompt, default_name)
+ if value is None:
+ return False
+ result[0] = core.BNAllocString(str(value))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _get_form_input(self, ctxt, fields, count, title):
+ try:
+ field_objs = []
+ for i in range(0, count):
+ if fields[i].type == FormInputFieldType.LabelFormField:
+ field_objs.append(LabelField(fields[i].prompt))
+ elif fields[i].type == FormInputFieldType.SeparatorFormField:
+ field_objs.append(SeparatorField())
+ elif fields[i].type == FormInputFieldType.TextLineFormField:
+ field_objs.append(TextLineField(fields[i].prompt))
+ elif fields[i].type == FormInputFieldType.MultilineTextFormField:
+ field_objs.append(MultilineTextField(fields[i].prompt))
+ elif fields[i].type == FormInputFieldType.IntegerFormField:
+ field_objs.append(IntegerField(fields[i].prompt))
+ elif fields[i].type == FormInputFieldType.AddressFormField:
+ view = None
+ if fields[i].view:
+ view = binaryview.BinaryView(handle = core.BNNewViewReference(fields[i].view))
+ field_objs.append(AddressField(fields[i].prompt, view, fields[i].currentAddress))
+ elif fields[i].type == FormInputFieldType.ChoiceFormField:
+ choices = []
+ for j in range(0, fields[i].count):
+ choices.append(fields[i].choices[j])
+ field_objs.append(ChoiceField(fields[i].prompt, choices))
+ elif fields[i].type == FormInputFieldType.OpenFileNameFormField:
+ field_objs.append(OpenFileNameField(fields[i].prompt, fields[i].ext))
+ elif fields[i].type == FormInputFieldType.SaveFileNameFormField:
+ field_objs.append(SaveFileNameField(fields[i].prompt, fields[i].ext, fields[i].defaultName))
+ elif fields[i].type == FormInputFieldType.DirectoryNameFormField:
+ field_objs.append(DirectoryNameField(fields[i].prompt, fields[i].defaultName))
+ else:
+ field_objs.append(LabelField(fields[i].prompt))
+ if not self.get_form_input(field_objs, title):
+ return False
+ for i in range(0, count):
+ field_objs[i]._fill_core_result(fields[i])
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _show_message_box(self, ctxt, title, text, buttons, icon):
+ try:
+ return self.show_message_box(title, text, buttons, icon)
+ except:
+ log.log_error(traceback.format_exc())
+
+
+
+[docs] def show_markdown_report(self, view, title, contents, plaintext):
+ self.show_html_report(view, title, markdown_to_html(contents), plaintext)
+
+[docs] def show_html_report(self, view, title, contents, plaintext):
+ if len(plaintext) != 0:
+ self.show_plain_text_report(view, title, plaintext)
+
+
+
+
+
+
+
+[docs] def get_int_input(self, prompt, title):
+ while True:
+ text = self.get_text_line_input(prompt, title)
+ if len(text) == 0:
+ return False
+ try:
+ return int(text)
+ except:
+ continue
+
+[docs] def get_address_input(self, prompt, title, view, current_address):
+ return get_int_input(prompt, title)
+
+
+
+[docs] def get_open_filename_input(self, prompt, ext):
+ return get_text_line_input(prompt, "Open File")
+
+[docs] def get_save_filename_input(self, prompt, ext, default_name):
+ return get_text_line_input(prompt, "Save File")
+
+[docs] def get_directory_name_input(self, prompt, default_name):
+ return get_text_line_input(prompt, "Select Directory")
+
+
+
+[docs] def show_message_box(self, title, text, buttons, icon):
+ return MessageBoxButtonResult.CancelButton
+
+
+[docs]class PlainTextReport(object):
+[docs] def __init__(self, title, contents, view = None):
+ self.view = view
+ self.title = title
+ self.contents = contents
+
+ def __repr__(self):
+ return "<plaintext report: %s>" % self.title
+
+ def __str__(self):
+ return self.contents
+
+
+[docs]class MarkdownReport(object):
+[docs] def __init__(self, title, contents, plaintext = "", view = None):
+ self.view = view
+ self.title = title
+ self.contents = contents
+ self.plaintext = plaintext
+
+ def __repr__(self):
+ return "<markdown report: %s>" % self.title
+
+ def __str__(self):
+ return self.contents
+
+
+[docs]class HTMLReport(object):
+[docs] def __init__(self, title, contents, plaintext = "", view = None):
+ self.view = view
+ self.title = title
+ self.contents = contents
+ self.plaintext = plaintext
+
+ def __repr__(self):
+ return "<html report: %s>" % self.title
+
+ def __str__(self):
+ return self.contents
+
+
+[docs]class FlowGraphReport(object):
+[docs] def __init__(self, title, graph, view = None):
+ self.view = view
+ self.title = title
+ self.graph = graph
+
+ def __repr__(self):
+ return "<graph report: %s>" % self.title
+
+
+[docs]class ReportCollection(object):
+[docs] def __init__(self, handle = None):
+ if handle is None:
+ self.handle = core.BNCreateReportCollection()
+ else:
+ self.handle = handle
+
+ def __len__(self):
+ return core.BNGetReportCollectionCount(self.handle)
+
+ def _report_from_index(self, i):
+ report_type = core.BNGetReportType(self.handle, i)
+ title = core.BNGetReportTitle(self.handle, i)
+ view = core.BNGetReportView(self.handle, i)
+ if view:
+ view = binaryview.BinaryView(handle = view)
+ else:
+ view = None
+ if report_type == ReportType.PlainTextReportType:
+ contents = core.BNGetReportContents(self.handle, i)
+ return PlainTextReport(title, contents, view)
+ elif report_type == ReportType.MarkdownReportType:
+ contents = core.BNGetReportContents(self.handle, i)
+ plaintext = core.BNGetReportPlainText(self.handle, i)
+ return MarkdownReport(title, contents, plaintext, view)
+ elif report_type == ReportType.HTMLReportType:
+ contents = core.BNGetReportContents(self.handle, i)
+ plaintext = core.BNGetReportPlainText(self.handle, i)
+ return HTMLReport(title, contents, plaintext, view)
+ elif report_type == ReportType.FlowGraphReportType:
+ graph = flowgraph.CoreFlowGraph(core.BNGetReportFlowGraph(self.handle, i))
+ return FlowGraphReport(title, graph, view)
+ raise TypeError("invalid report type %s" % repr(report_type))
+
+ def __getitem__(self, i):
+ if isinstance(i, slice) or isinstance(i, tuple):
+ raise IndexError("expected integer report index")
+ if (i < 0) or (i >= len(self)):
+ raise IndexError("index out of range")
+ return self._report_from_index(i)
+
+ def __iter__(self):
+ count = len(self)
+ for i in range(0, count):
+ yield self._report_from_index(i)
+
+ def __repr__(self):
+ return "<reports: %s>" % repr(list(self))
+
+[docs] def append(self, report):
+ if report.view is None:
+ view = None
+ else:
+ view = report.view.handle
+ if isinstance(report, PlainTextReport):
+ core.BNAddPlainTextReportToCollection(self.handle, view, report.title, report.contents)
+ elif isinstance(report, MarkdownReport):
+ core.BNAddMarkdownReportToCollection(self.handle, view, report.title, report.contents, report.plaintext)
+ elif isinstance(report, HTMLReport):
+ core.BNAddHTMLReportToCollection(self.handle, view, report.title, report.contents, report.plaintext)
+ elif isinstance(report, FlowGraphReport):
+ core.BNAddGraphReportToCollection(self.handle, view, report.title, report.graph.handle)
+ else:
+ raise TypeError("expected report object")
+
+
+[docs]def markdown_to_html(contents):
+ """
+ ``markdown_to_html`` converts the provided markdown to HTML.
+
+ :param string contents: Markdown contents to convert to HTML.
+ :rtype: string
+ :Example:
+ >>> markdown_to_html("##Yay")
+ '<h2>Yay</h2>'
+ """
+ return core.BNMarkdownToHTML(contents)
+
+
+[docs]def show_plain_text_report(title, contents):
+ """
+ ``show_plain_text_report`` displays contents to the user in the UI or on the command-line.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used.
+
+ :param str title: title to display in the UI pop-up.
+ :param str contents: plaintext contents to display
+ :rtype: None
+ :Example:
+ >>> show_plain_text_report("title", "contents")
+ contents
+ """
+ core.BNShowPlainTextReport(None, title, contents)
+
+
+[docs]def show_markdown_report(title, contents, plaintext=""):
+ """
+ ``show_markdown_report`` displays the markdown contents in UI applications and plaintext in command-line
+ applications. This API doesn't support hyperlinking into the BinaryView, use the BinaryView.show_markdown_report
+ if hyperlinking is needed.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used.
+
+ :param str contents: markdown contents to display
+ :param str plaintext: Plain text version to display (used on the command-line)
+ :rtype: None
+ :Example:
+ >>> show_markdown_report("title", "##Contents", "Plain text contents")
+ Plain text contents
+ """
+ core.BNShowMarkdownReport(None, title, contents, plaintext)
+
+
+[docs]def show_html_report(title, contents, plaintext=""):
+ """
+ ``show_html_report`` displays the HTML contents in UI applications and plaintext in command-line
+ applications. This API doesn't support hyperlinking into the BinaryView, use the BinaryView.show_html_report
+ if hyperlinking is needed.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used.
+
+ :param str contents: HTML contents to display
+ :param str plaintext: Plain text version to display (used on the command-line)
+ :rtype: None
+ :Example:
+ >>> show_html_report("title", "<h1>Contents</h1>", "Plain text contents")
+ Plain text contents
+ """
+ core.BNShowHTMLReport(None, title, contents, plaintext)
+
+
+[docs]def show_graph_report(title, graph):
+ """
+ ``show_graph_report`` displays a flow graph in UI applications.
+
+ Note: This API function will have no effect outside the UI.
+
+ :param FlowGraph graph: Flow graph to display
+ :rtype: None
+ """
+ func = graph.function
+ if func is None:
+ core.BNShowGraphReport(None, title, graph.handle)
+ else:
+ core.BNShowGraphReport(func.view.handle, title, graph.handle)
+
+
+[docs]def show_report_collection(title, reports):
+ """
+ ``show_report_collection`` displays multiple reports in UI applications.
+
+ Note: This API function will have no effect outside the UI.
+
+ :param ReportCollection reports: Reports to display
+ :rtype: None
+ """
+ core.BNShowReportCollection(title, reports.handle)
+
+
+[docs]def get_text_line_input(prompt, title):
+ """
+ ``get_text_line_input`` prompts the user to input a string with the given prompt and title.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used.
+
+ :param str prompt: String to prompt with.
+ :param str title: Title of the window when executed in the UI.
+ :rtype: string containing the input without trailing newline character.
+ :Example:
+ >>> get_text_line_input("PROMPT>", "getinfo")
+ PROMPT> Input!
+ 'Input!'
+ """
+ value = ctypes.c_char_p()
+ if not core.BNGetTextLineInput(value, prompt, title):
+ return None
+ result = value.value
+ core.BNFreeString(ctypes.cast(value, ctypes.POINTER(ctypes.c_byte)))
+ return result
+
+
+[docs]def get_int_input(prompt, title):
+ """
+ ``get_int_input`` prompts the user to input a integer with the given prompt and title.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used.
+
+ :param str prompt: String to prompt with.
+ :param str title: Title of the window when executed in the UI.
+ :rtype: integer value input by the user.
+ :Example:
+ >>> get_int_input("PROMPT>", "getinfo")
+ PROMPT> 10
+ 10
+ """
+ value = ctypes.c_longlong()
+ if not core.BNGetIntegerInput(value, prompt, title):
+ return None
+ return value.value
+
+
+[docs]def get_address_input(prompt, title):
+ """
+ ``get_address_input`` prompts the user for an address with the given prompt and title.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used.
+
+ :param str prompt: String to prompt with.
+ :param str title: Title of the window when executed in the UI.
+ :rtype: integer value input by the user.
+ :Example:
+ >>> get_address_input("PROMPT>", "getinfo")
+ PROMPT> 10
+ 10L
+ """
+ value = ctypes.c_ulonglong()
+ if not core.BNGetAddressInput(value, prompt, title, None, 0):
+ return None
+ return value.value
+
+
+[docs]def get_choice_input(prompt, title, choices):
+ """
+ ``get_choice_input`` prompts the user to select the one of the provided choices.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used. The UI uses a combo box.
+
+ :param str prompt: String to prompt with.
+ :param str title: Title of the window when executed in the UI.
+ :param list choices: A list of strings for the user to choose from.
+ :rtype: integer array index of the selected option
+ :Example:
+ >>> get_choice_input("PROMPT>", "choices", ["Yes", "No", "Maybe"])
+ choices
+ 1) Yes
+ 2) No
+ 3) Maybe
+ PROMPT> 1
+ 0L
+ """
+ choice_buf = (ctypes.c_char_p * len(choices))()
+ for i in range(0, len(choices)):
+ choice_buf[i] = str(choices[i]).encode('charmap')
+ value = ctypes.c_ulonglong()
+ if not core.BNGetChoiceInput(value, prompt, title, choice_buf, len(choices)):
+ return None
+ return value.value
+
+
+[docs]def get_open_filename_input(prompt, ext=""):
+ """
+ ``get_open_filename_input`` prompts the user for a file name to open.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used. The UI uses the native window pop-up for file selection.
+
+ :param str prompt: Prompt to display.
+ :param str ext: Optional, file extension
+ :Example:
+ >>> get_open_filename_input("filename:", "exe")
+ filename: foo.exe
+ 'foo.exe'
+ """
+ value = ctypes.c_char_p()
+ if not core.BNGetOpenFileNameInput(value, prompt, ext):
+ return None
+ result = value.value
+ core.BNFreeString(ctypes.cast(value, ctypes.POINTER(ctypes.c_byte)))
+ return result
+
+
+[docs]def get_save_filename_input(prompt, ext="", default_name=""):
+ """
+ ``get_save_filename_input`` prompts the user for a file name to save as, optionally providing a file extension and
+ default_name.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line
+ a simple text prompt is used. The UI uses the native window pop-up for file selection.
+
+ :param str prompt: Prompt to display.
+ :param str ext: Optional, file extension
+ :param str default_name: Optional, default file name.
+ :Example:
+ >>> get_save_filename_input("filename:", "exe", "foo.exe")
+ filename: foo.exe
+ 'foo.exe'
+ """
+ value = ctypes.c_char_p()
+ if not core.BNGetSaveFileNameInput(value, prompt, ext, default_name):
+ return None
+ result = value.value
+ core.BNFreeString(ctypes.cast(value, ctypes.POINTER(ctypes.c_byte)))
+ return result
+
+
+[docs]def get_directory_name_input(prompt, default_name=""):
+ """
+ ``get_directory_name_input`` prompts the user for a directory name to save as, optionally providing a default_name.
+
+ Note: This API function differently on the command-line vs the UI. In the UI a pop-up is used. On the command-line a simple text prompt is used. The UI uses the native window pop-up for file selection.
+
+ :param str prompt: Prompt to display.
+ :param str default_name: Optional, default directory name.
+ :rtype: str
+ :Example:
+ >>> get_directory_name_input("prompt")
+ prompt dirname
+ 'dirname'
+ """
+ value = ctypes.c_char_p()
+ if not core.BNGetDirectoryNameInput(value, prompt, default_name):
+ return None
+ result = value.value
+ core.BNFreeString(ctypes.cast(value, ctypes.POINTER(ctypes.c_byte)))
+ return result
+
+
+[docs]def get_form_input(fields, title):
+ """
+ ``get_from_input`` Prompts the user for a set of inputs specified in ``fields`` with given title.
+ The fields parameter is a list which can contain the following types:
+ - str - an alias for LabelField
+ - None - an alias for SeparatorField
+ - LabelField - Text output
+ - SeparatorField - Vertical spacing
+ - TextLineField - Prompt for a string value
+ - MultilineTextField - Prompt for multi-line string value
+ - IntegerField - Prompt for an integer
+ - AddressField - Prompt for an address
+ - ChoiceField - Prompt for a choice from provided options
+ - OpenFileNameField - Prompt for file to open
+ - SaveFileNameField - Prompt for file to save to
+ - DirectoryNameField - Prompt for directory name
+ This API is flexible and works both in the UI via a pop-up dialog and on the command-line.
+ :params list fields: A list containing of the above specified classes, strings or None
+ :params str title: The title of the pop-up dialog.
+ :Example:
+
+ >>> int_f = IntegerField("Specify Integer")
+ >>> tex_f = TextLineField("Specify name")
+ >>> choice_f = ChoiceField("Options", ["Yes", "No", "Maybe"])
+ >>> get_form_input(["Get Data", None, int_f, tex_f, choice_f], "The options")
+ Get Data
+
+ Specify Integer 1337
+ Specify name Peter
+ The options
+ 1) Yes
+ 2) No
+ 3) Maybe
+ Options 1
+ >>> True
+ >>> print(tex_f.result, int_f.result, choice_f.result)
+ Peter 1337 0
+ """
+ value = (core.BNFormInputField * len(fields))()
+ for i in range(0, len(fields)):
+ if isinstance(fields[i], str):
+ LabelField(fields[i])._fill_core_struct(value[i])
+ elif fields[i] is None:
+ SeparatorField()._fill_core_struct(value[i])
+ else:
+ fields[i]._fill_core_struct(value[i])
+ if not core.BNGetFormInput(value, len(fields), title):
+ return False
+ for i in range(0, len(fields)):
+ if not (isinstance(fields[i], str) or (fields[i] is None)):
+ fields[i]._get_result(value[i])
+ core.BNFreeFormInputResults(value, len(fields))
+ return True
+
+
+[docs]def show_message_box(title, text, buttons=MessageBoxButtonSet.OKButtonSet, icon=MessageBoxIcon.InformationIcon):
+ """
+ ``show_message_box`` Displays a configurable message box in the UI, or prompts on the console as appropriate
+ retrieves a list of all Symbol objects of the provided symbol type in the optionally
+ provided range.
+
+ :param str title: Text title for the message box.
+ :param str text: Text for the main body of the message box.
+ :param MessageBoxButtonSet buttons: One of :py:class:`MessageBoxButtonSet`
+ :param MessageBoxIcon icon: One of :py:class:`MessageBoxIcon`
+ :return: Which button was selected
+ :rtype: MessageBoxButtonResult
+ """
+ return core.BNShowMessageBox(title, text, buttons, icon)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+[docs]class LinearDisassemblyPosition(object):
+ """
+ ``class LinearDisassemblyPosition`` is a helper object containing the position of the current Linear Disassembly.
+
+ .. note:: This object should not be instantiated directly. Rather call \
+ :py:meth:`get_linear_disassembly_position_at` which instantiates this object.
+ """
+[docs] def __init__(self, func, block, addr):
+ self.function = func
+ self.block = block
+ self.address = addr
+
+
+[docs]class LinearDisassemblyLine(object):
+[docs] def __init__(self, line_type, func, block, line_offset, contents):
+ self.type = line_type
+ self.function = func
+ self.block = block
+ self.line_offset = line_offset
+ self.contents = contents
+
+ def __str__(self):
+ return str(self.contents)
+
+ def __repr__(self):
+ return repr(self.contents)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import LogLevel
+
+
+_output_to_log = False
+
+
+
+
+
+
+
+
+[docs]def log(level, text):
+ """
+ ``log`` writes messages to the log console for the given log level.
+
+ ============ ======== =======================================================================
+ LogLevelName LogLevel Description
+ ============ ======== =======================================================================
+ DebugLog 0 Logs debuging information messages to the console.
+ InfoLog 1 Logs general information messages to the console.
+ WarningLog 2 Logs message to console with **Warning** icon.
+ ErrorLog 3 Logs message to console with **Error** icon, focusing the error console.
+ AlertLog 4 Logs message to pop up window.
+ ============ ======== =======================================================================
+
+ :param LogLevel level: Log level to use
+ :param str text: message to print
+ :rtype: None
+ """
+ core.BNLog(level, '%s', text)
+
+
+[docs]def log_debug(text):
+ """
+ ``log_debug`` Logs debugging information messages to the console.
+
+ :param str text: message to print
+ :rtype: None
+ :Example:
+
+ >>> log_to_stdout(LogLevel.DebugLog)
+ >>> log_debug("Hotdogs!")
+ Hotdogs!
+ """
+ core.BNLogDebug('%s', text)
+
+
+[docs]def log_info(text):
+ """
+ ``log_info`` Logs general information messages to the console.
+
+ :param str text: message to print
+ :rtype: None
+ :Example:
+
+ >>> log_info("Saucisson!")
+ Saucisson!
+ >>>
+ """
+ core.BNLogInfo('%s', text)
+
+
+[docs]def log_warn(text):
+ """
+ ``log_warn`` Logs message to console, if run through the GUI it logs with **Warning** icon.
+
+ :param str text: message to print
+ :rtype: None
+ :Example:
+
+ >>> log_to_stdout(LogLevel.DebugLog)
+ >>> log_info("Chilidogs!")
+ Chilidogs!
+ >>>
+ """
+ core.BNLogWarn('%s', text)
+
+
+[docs]def log_error(text):
+ """
+ ``log_error`` Logs message to console, if run through the GUI it logs with **Error** icon, focusing the error console.
+
+ :param str text: message to print
+ :rtype: None
+ :Example:
+
+ >>> log_to_stdout(LogLevel.DebugLog)
+ >>> log_error("Spanferkel!")
+ Spanferkel!
+ >>>
+ """
+ core.BNLogError('%s', text)
+
+
+[docs]def log_alert(text):
+ """
+ ``log_alert`` Logs message console and to a pop up window if run through the GUI.
+
+ :param str text: message to print
+ :rtype: None
+ :Example:
+
+ >>> log_to_stdout(LogLevel.DebugLog)
+ >>> log_alert("Kielbasa!")
+ Kielbasa!
+ >>>
+ """
+ core.BNLogAlert('%s', text)
+
+
+[docs]def log_to_stdout(min_level=LogLevel.InfoLog):
+ """
+ ``log_to_stdout`` redirects minimum log level to standard out.
+
+ :param int min_level: minimum level to log to
+ :rtype: None
+ :Example:
+
+ >>> log_debug("Hotdogs!")
+ >>> log_to_stdout(LogLevel.DebugLog)
+ >>> log_debug("Hotdogs!")
+ Hotdogs!
+ >>>
+ """
+ core.BNLogToStdout(min_level)
+
+
+[docs]def log_to_stderr(min_level):
+ """
+ ``log_to_stderr`` redirects minimum log level to standard error.
+
+ :param int min_level: minimum level to log to
+ :rtype: None
+ """
+ core.BNLogToStderr(min_level)
+
+
+[docs]def log_to_file(min_level, path, append = False):
+ """
+ ``log_to_file`` redirects minimum log level to a file named ``path``, optionally appending rather than overwriting.
+
+ :param int min_level: minimum level to log to
+ :param str path: path to log to
+ :param bool append: optional flag for specifying appending. True = append, False = overwrite.
+ :rtype: None
+ """
+ core.BNLogToFile(min_level, str(path), append)
+
+
+[docs]def close_logs():
+ """
+ ``close_logs`` close all log files.
+
+ :rtype: None
+ """
+ core.BNCloseLogs()
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import ctypes
+import struct
+
+# Binary Ninja components
+import binaryninja
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import LowLevelILOperation, LowLevelILFlagCondition, InstructionTextTokenType
+from binaryninja import basicblock #required for LowLevelILBasicBlock
+
+# 2-3 compatibility
+from binaryninja import range
+
+
+[docs]class LowLevelILLabel(object):
+[docs] def __init__(self, handle = None):
+ if handle is None:
+ self.handle = (core.BNLowLevelILLabel * 1)()
+ core.BNLowLevelILInitLabel(self.handle)
+ else:
+ self.handle = handle
+
+
+[docs]class ILRegister(object):
+[docs] def __init__(self, arch, reg):
+ self.arch = arch
+ self.index = reg
+ self.temp = (self.index & 0x80000000) != 0
+ if self.temp:
+ self.name = "temp%d" % (self.index & 0x7fffffff)
+ else:
+ self.name = self.arch.get_reg_name(self.index)
+
+ @property
+ def info(self):
+ return self.arch.regs[self.name]
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+ def __eq__(self, other):
+ return self.info == other.info
+
+
+[docs]class ILRegisterStack(object):
+[docs] def __init__(self, arch, reg_stack):
+ self.arch = arch
+ self.index = reg_stack
+ self.name = self.arch.get_reg_stack_name(self.index)
+
+ @property
+ def info(self):
+ return self.arch.reg_stacks[self.name]
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+ def __eq__(self, other):
+ return self.info == other.info
+
+
+[docs]class ILFlag(object):
+[docs] def __init__(self, arch, flag):
+ self.arch = arch
+ self.index = flag
+ self.temp = (self.index & 0x80000000) != 0
+ if self.temp:
+ self.name = "cond:%d" % (self.index & 0x7fffffff)
+ else:
+ self.name = self.arch.get_flag_name(self.index)
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+
+[docs]class ILSemanticFlagClass(object):
+[docs] def __init__(self, arch, sem_class):
+ self.arch = arch
+ self.index = sem_class
+ self.name = self.arch.get_semantic_flag_class_name(self.index)
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+ def __eq__(self, other):
+ return self.index == other.index
+
+
+[docs]class ILSemanticFlagGroup(object):
+[docs] def __init__(self, arch, sem_group):
+ self.arch = arch
+ self.index = sem_group
+ self.name = self.arch.get_semantic_flag_group_name(self.index)
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+ def __eq__(self, other):
+ return self.index == other.index
+
+
+[docs]class ILIntrinsic(object):
+[docs] def __init__(self, arch, intrinsic):
+ self.arch = arch
+ self.index = intrinsic
+ self.name = self.arch.get_intrinsic_name(self.index)
+ if self.name in self.arch.intrinsics:
+ self.inputs = self.arch.intrinsics[self.name].inputs
+ self.outputs = self.arch.intrinsics[self.name].outputs
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+ def __eq__(self, other):
+ return self.index == other.index
+
+
+[docs]class SSARegister(object):
+
+
+ def __repr__(self):
+ return "<ssa %s version %d>" % (repr(self.reg), self.version)
+
+
+[docs]class SSARegisterStack(object):
+[docs] def __init__(self, reg_stack, version):
+ self.reg_stack = reg_stack
+ self.version = version
+
+ def __repr__(self):
+ return "<ssa %s version %d>" % (repr(self.reg_stack), self.version)
+
+
+[docs]class SSAFlag(object):
+
+
+ def __repr__(self):
+ return "<ssa %s version %d>" % (repr(self.flag), self.version)
+
+
+[docs]class SSARegisterOrFlag(object):
+[docs] def __init__(self, reg_or_flag, version):
+ self.reg_or_flag = reg_or_flag
+ self.version = version
+
+ def __repr__(self):
+ return "<ssa %s version %d>" % (repr(self.reg_or_flag), self.version)
+
+
+[docs]class LowLevelILOperationAndSize(object):
+
+
+ def __repr__(self):
+ if self.size == 0:
+ return "<%s>" % self.operation.name
+ return "<%s %d>" % (self.operation.name, self.size)
+
+
+[docs]class LowLevelILInstruction(object):
+ """
+ ``class LowLevelILInstruction`` Low Level Intermediate Language Instructions are infinite length tree-based
+ instructions. Tree-based instructions use infix notation with the left hand operand being the destination operand.
+ Infix notation is thus more natural to read than other notations (e.g. x86 ``mov eax, 0`` vs. LLIL ``eax = 0``).
+ """
+
+ ILOperations = {
+ LowLevelILOperation.LLIL_NOP: [],
+ LowLevelILOperation.LLIL_SET_REG: [("dest", "reg"), ("src", "expr")],
+ LowLevelILOperation.LLIL_SET_REG_SPLIT: [("hi", "reg"), ("lo", "reg"), ("src", "expr")],
+ LowLevelILOperation.LLIL_SET_REG_STACK_REL: [("stack", "reg_stack"), ("dest", "expr"), ("src", "expr")],
+ LowLevelILOperation.LLIL_REG_STACK_PUSH: [("stack", "reg_stack"), ("src", "expr")],
+ LowLevelILOperation.LLIL_SET_FLAG: [("dest", "flag"), ("src", "expr")],
+ LowLevelILOperation.LLIL_LOAD: [("src", "expr")],
+ LowLevelILOperation.LLIL_STORE: [("dest", "expr"), ("src", "expr")],
+ LowLevelILOperation.LLIL_PUSH: [("src", "expr")],
+ LowLevelILOperation.LLIL_POP: [],
+ LowLevelILOperation.LLIL_REG: [("src", "reg")],
+ LowLevelILOperation.LLIL_REG_SPLIT: [("hi", "reg"), ("lo", "reg")],
+ LowLevelILOperation.LLIL_REG_STACK_REL: [("stack", "reg_stack"), ("src", "expr")],
+ LowLevelILOperation.LLIL_REG_STACK_POP: [("stack", "reg_stack")],
+ LowLevelILOperation.LLIL_REG_STACK_FREE_REG: [("dest", "reg")],
+ LowLevelILOperation.LLIL_REG_STACK_FREE_REL: [("stack", "reg_stack"), ("dest", "expr")],
+ LowLevelILOperation.LLIL_CONST: [("constant", "int")],
+ LowLevelILOperation.LLIL_CONST_PTR: [("constant", "int")],
+ LowLevelILOperation.LLIL_EXTERN_PTR: [("constant", "int"), ("offset", "int")],
+ LowLevelILOperation.LLIL_FLOAT_CONST: [("constant", "float")],
+ LowLevelILOperation.LLIL_FLAG: [("src", "flag")],
+ LowLevelILOperation.LLIL_FLAG_BIT: [("src", "flag"), ("bit", "int")],
+ LowLevelILOperation.LLIL_ADD: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_ADC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
+ LowLevelILOperation.LLIL_SUB: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_SBB: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
+ LowLevelILOperation.LLIL_AND: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_OR: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_XOR: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_LSL: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_LSR: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_ASR: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_ROL: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_RLC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
+ LowLevelILOperation.LLIL_ROR: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_RRC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
+ LowLevelILOperation.LLIL_MUL: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_MULU_DP: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_MULS_DP: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_DIVU: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_DIVU_DP: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_DIVS: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_DIVS_DP: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_MODU: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_MODU_DP: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_MODS: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_MODS_DP: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_NEG: [("src", "expr")],
+ LowLevelILOperation.LLIL_NOT: [("src", "expr")],
+ LowLevelILOperation.LLIL_SX: [("src", "expr")],
+ LowLevelILOperation.LLIL_ZX: [("src", "expr")],
+ LowLevelILOperation.LLIL_LOW_PART: [("src", "expr")],
+ LowLevelILOperation.LLIL_JUMP: [("dest", "expr")],
+ LowLevelILOperation.LLIL_JUMP_TO: [("dest", "expr"), ("targets", "int_list")],
+ LowLevelILOperation.LLIL_CALL: [("dest", "expr")],
+ LowLevelILOperation.LLIL_CALL_STACK_ADJUST: [("dest", "expr"), ("stack_adjustment", "int"), ("reg_stack_adjustments", "reg_stack_adjust")],
+ LowLevelILOperation.LLIL_TAILCALL: [("dest", "expr")],
+ LowLevelILOperation.LLIL_RET: [("dest", "expr")],
+ LowLevelILOperation.LLIL_NORET: [],
+ LowLevelILOperation.LLIL_IF: [("condition", "expr"), ("true", "int"), ("false", "int")],
+ LowLevelILOperation.LLIL_GOTO: [("dest", "int")],
+ LowLevelILOperation.LLIL_FLAG_COND: [("condition", "cond"), ("semantic_class", "sem_class")],
+ LowLevelILOperation.LLIL_FLAG_GROUP: [("semantic_group", "sem_group")],
+ LowLevelILOperation.LLIL_CMP_E: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_CMP_NE: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_CMP_SLT: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_CMP_ULT: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_CMP_SLE: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_CMP_ULE: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_CMP_SGE: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_CMP_UGE: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_CMP_SGT: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_CMP_UGT: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_TEST_BIT: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_BOOL_TO_INT: [("src", "expr")],
+ LowLevelILOperation.LLIL_ADD_OVERFLOW: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_SYSCALL: [],
+ LowLevelILOperation.LLIL_INTRINSIC: [("output", "reg_or_flag_list"), ("intrinsic", "intrinsic"), ("param", "expr")],
+ LowLevelILOperation.LLIL_INTRINSIC_SSA: [("output", "reg_or_flag_ssa_list"), ("intrinsic", "intrinsic"), ("param", "expr")],
+ LowLevelILOperation.LLIL_BP: [],
+ LowLevelILOperation.LLIL_TRAP: [("vector", "int")],
+ LowLevelILOperation.LLIL_UNDEF: [],
+ LowLevelILOperation.LLIL_UNIMPL: [],
+ LowLevelILOperation.LLIL_UNIMPL_MEM: [("src", "expr")],
+ LowLevelILOperation.LLIL_FADD: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FSUB: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FMUL: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FDIV: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FSQRT: [("src", "expr")],
+ LowLevelILOperation.LLIL_FNEG: [("src", "expr")],
+ LowLevelILOperation.LLIL_FABS: [("src", "expr")],
+ LowLevelILOperation.LLIL_FLOAT_TO_INT: [("src", "expr")],
+ LowLevelILOperation.LLIL_INT_TO_FLOAT: [("src", "expr")],
+ LowLevelILOperation.LLIL_FLOAT_CONV: [("src", "expr")],
+ LowLevelILOperation.LLIL_ROUND_TO_INT: [("src", "expr")],
+ LowLevelILOperation.LLIL_FLOOR: [("src", "expr")],
+ LowLevelILOperation.LLIL_CEIL: [("src", "expr")],
+ LowLevelILOperation.LLIL_FTRUNC: [("src", "expr")],
+ LowLevelILOperation.LLIL_FCMP_E: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FCMP_NE: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FCMP_LT: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FCMP_LE: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FCMP_GE: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FCMP_GT: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FCMP_O: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_FCMP_UO: [("left", "expr"), ("right", "expr")],
+ LowLevelILOperation.LLIL_SET_REG_SSA: [("dest", "reg_ssa"), ("src", "expr")],
+ LowLevelILOperation.LLIL_SET_REG_SSA_PARTIAL: [("full_reg", "reg_ssa"), ("dest", "reg"), ("src", "expr")],
+ LowLevelILOperation.LLIL_SET_REG_SPLIT_SSA: [("hi", "expr"), ("lo", "expr"), ("src", "expr")],
+ LowLevelILOperation.LLIL_SET_REG_STACK_REL_SSA: [("stack", "expr"), ("dest", "expr"), ("top", "expr"), ("src", "expr")],
+ LowLevelILOperation.LLIL_SET_REG_STACK_ABS_SSA: [("stack", "expr"), ("dest", "reg"), ("src", "expr")],
+ LowLevelILOperation.LLIL_REG_SPLIT_DEST_SSA: [("dest", "reg_ssa")],
+ LowLevelILOperation.LLIL_REG_STACK_DEST_SSA: [("src", "reg_stack_ssa_dest_and_src")],
+ LowLevelILOperation.LLIL_REG_SSA: [("src", "reg_ssa")],
+ LowLevelILOperation.LLIL_REG_SSA_PARTIAL: [("full_reg", "reg_ssa"), ("src", "reg")],
+ LowLevelILOperation.LLIL_REG_SPLIT_SSA: [("hi", "reg_ssa"), ("lo", "reg_ssa")],
+ LowLevelILOperation.LLIL_REG_STACK_REL_SSA: [("stack", "reg_stack_ssa"), ("src", "expr"), ("top", "expr")],
+ LowLevelILOperation.LLIL_REG_STACK_ABS_SSA: [("stack", "reg_stack_ssa"), ("src", "reg")],
+ LowLevelILOperation.LLIL_REG_STACK_FREE_REL_SSA: [("stack", "expr"), ("dest", "expr"), ("top", "expr")],
+ LowLevelILOperation.LLIL_REG_STACK_FREE_ABS_SSA: [("stack", "expr"), ("dest", "reg")],
+ LowLevelILOperation.LLIL_SET_FLAG_SSA: [("dest", "flag_ssa"), ("src", "expr")],
+ LowLevelILOperation.LLIL_FLAG_SSA: [("src", "flag_ssa")],
+ LowLevelILOperation.LLIL_FLAG_BIT_SSA: [("src", "flag_ssa"), ("bit", "int")],
+ LowLevelILOperation.LLIL_CALL_SSA: [("output", "expr"), ("dest", "expr"), ("stack", "expr"), ("param", "expr")],
+ LowLevelILOperation.LLIL_SYSCALL_SSA: [("output", "expr"), ("stack", "expr"), ("param", "expr")],
+ LowLevelILOperation.LLIL_TAILCALL_SSA: [("output", "expr"), ("dest", "expr"), ("stack", "expr"), ("param", "expr")],
+ LowLevelILOperation.LLIL_CALL_OUTPUT_SSA: [("dest_memory", "int"), ("dest", "reg_ssa_list")],
+ LowLevelILOperation.LLIL_CALL_STACK_SSA: [("src", "reg_ssa"), ("src_memory", "int")],
+ LowLevelILOperation.LLIL_CALL_PARAM: [("src", "expr_list")],
+ LowLevelILOperation.LLIL_LOAD_SSA: [("src", "expr"), ("src_memory", "int")],
+ LowLevelILOperation.LLIL_STORE_SSA: [("dest", "expr"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
+ LowLevelILOperation.LLIL_REG_PHI: [("dest", "reg_ssa"), ("src", "reg_ssa_list")],
+ LowLevelILOperation.LLIL_REG_STACK_PHI: [("dest", "reg_stack_ssa"), ("src", "reg_stack_ssa_list")],
+ LowLevelILOperation.LLIL_FLAG_PHI: [("dest", "flag_ssa"), ("src", "flag_ssa_list")],
+ LowLevelILOperation.LLIL_MEM_PHI: [("dest_memory", "int"), ("src_memory", "int_list")]
+ }
+
+[docs] def __init__(self, func, expr_index, instr_index=None):
+ instr = core.BNGetLowLevelILByIndex(func.handle, expr_index)
+ self.function = func
+ self.expr_index = expr_index
+ self.instr_index = instr_index
+ self.operation = LowLevelILOperation(instr.operation)
+ self.size = instr.size
+ self.address = instr.address
+ self.source_operand = instr.sourceOperand
+ if instr.flags == 0:
+ self.flags = None
+ else:
+ self.flags = func.arch.get_flag_write_type_name(instr.flags)
+ if self.source_operand == 0xffffffff:
+ self.source_operand = None
+ operands = LowLevelILInstruction.ILOperations[instr.operation]
+ self.operands = []
+ i = 0
+ for operand in operands:
+ name, operand_type = operand
+ if operand_type == "int":
+ value = instr.operands[i]
+ value = (value & ((1 << 63) - 1)) - (value & (1 << 63))
+ elif operand_type == "float":
+ if instr.size == 4:
+ value = struct.unpack("f", struct.pack("I", instr.operands[i] & 0xffffffff))[0]
+ elif instr.size == 8:
+ value = struct.unpack("d", struct.pack("Q", instr.operands[i]))[0]
+ else:
+ value = instr.operands[i]
+ elif operand_type == "expr":
+ value = LowLevelILInstruction(func, instr.operands[i])
+ elif operand_type == "reg":
+ value = ILRegister(func.arch, instr.operands[i])
+ elif operand_type == "reg_stack":
+ value = ILRegisterStack(func.arch, instr.operands[i])
+ elif operand_type == "intrinsic":
+ value = ILIntrinsic(func.arch, instr.operands[i])
+ elif operand_type == "reg_ssa":
+ reg = ILRegister(func.arch, instr.operands[i])
+ i += 1
+ value = SSARegister(reg, instr.operands[i])
+ elif operand_type == "reg_stack_ssa":
+ reg_stack = ILRegisterStack(func.arch, instr.operands[i])
+ i += 1
+ value = SSARegisterStack(reg_stack, instr.operands[i])
+ elif operand_type == "reg_stack_ssa_dest_and_src":
+ reg_stack = ILRegisterStack(func.arch, instr.operands[i])
+ i += 1
+ value = SSARegisterStack(reg_stack, instr.operands[i])
+ i += 1
+ self.operands.append(value)
+ self.dest = value
+ value = SSARegisterStack(reg_stack, instr.operands[i])
+ elif operand_type == "flag":
+ value = ILFlag(func.arch, instr.operands[i])
+ elif operand_type == "flag_ssa":
+ flag = ILFlag(func.arch, instr.operands[i])
+ i += 1
+ value = SSAFlag(flag, instr.operands[i])
+ elif operand_type == "sem_class":
+ if instr.operands[i] == 0:
+ value = None
+ else:
+ value = ILSemanticFlagClass(func.arch, instr.operands[i])
+ elif operand_type == "sem_group":
+ value = ILSemanticFlagGroup(func.arch, instr.operands[i])
+ elif operand_type == "cond":
+ value = LowLevelILFlagCondition(instr.operands[i])
+ elif operand_type == "int_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNLowLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = []
+ for j in range(count.value):
+ value.append(operand_list[j])
+ core.BNLowLevelILFreeOperandList(operand_list)
+ elif operand_type == "expr_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNLowLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = []
+ for j in range(count.value):
+ value.append(LowLevelILInstruction(func, operand_list[j]))
+ core.BNLowLevelILFreeOperandList(operand_list)
+ elif operand_type == "reg_or_flag_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNLowLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = []
+ for j in range(count.value):
+ if (operand_list[j] & (1 << 32)) != 0:
+ value.append(ILFlag(func.arch, operand_list[j] & 0xffffffff))
+ else:
+ value.append(ILRegister(func.arch, operand_list[j] & 0xffffffff))
+ core.BNLowLevelILFreeOperandList(operand_list)
+ elif operand_type == "reg_ssa_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNLowLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = []
+ for j in range(count.value // 2):
+ reg = operand_list[j * 2]
+ reg_version = operand_list[(j * 2) + 1]
+ value.append(SSARegister(ILRegister(func.arch, reg), reg_version))
+ core.BNLowLevelILFreeOperandList(operand_list)
+ elif operand_type == "reg_stack_ssa_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNLowLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = []
+ for j in range(count.value // 2):
+ reg_stack = operand_list[j * 2]
+ reg_version = operand_list[(j * 2) + 1]
+ value.append(SSARegisterStack(ILRegisterStack(func.arch, reg_stack), reg_version))
+ core.BNLowLevelILFreeOperandList(operand_list)
+ elif operand_type == "flag_ssa_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNLowLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = []
+ for j in range(count.value // 2):
+ flag = operand_list[j * 2]
+ flag_version = operand_list[(j * 2) + 1]
+ value.append(SSAFlag(ILFlag(func.arch, flag), flag_version))
+ core.BNLowLevelILFreeOperandList(operand_list)
+ elif operand_type == "reg_or_flag_ssa_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNLowLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = []
+ for j in range(count.value // 2):
+ if (operand_list[j * 2] & (1 << 32)) != 0:
+ reg_or_flag = ILFlag(func.arch, operand_list[j * 2] & 0xffffffff)
+ else:
+ reg_or_flag = ILRegister(func.arch, operand_list[j * 2] & 0xffffffff)
+ reg_version = operand_list[(j * 2) + 1]
+ value.append(SSARegisterOrFlag(reg_or_flag, reg_version))
+ core.BNLowLevelILFreeOperandList(operand_list)
+ elif operand_type == "reg_stack_adjust":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNLowLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = {}
+ for j in range(count.value // 2):
+ reg_stack = operand_list[j * 2]
+ adjust = operand_list[(j * 2) + 1]
+ if adjust & 0x80000000:
+ adjust |= ~0x80000000
+ value[func.arch.get_reg_stack_name(reg_stack)] = adjust
+ core.BNLowLevelILFreeOperandList(operand_list)
+ self.operands.append(value)
+ self.__dict__[name] = value
+ i += 1
+
+ def __str__(self):
+ tokens = self.tokens
+ if tokens is None:
+ return "invalid"
+ result = ""
+ for token in tokens:
+ result += token.text
+ return result
+
+ def __repr__(self):
+ return "<il: %s>" % str(self)
+
+ @property
+ def tokens(self):
+ """LLIL tokens (read-only)"""
+ count = ctypes.c_ulonglong()
+ tokens = ctypes.POINTER(core.BNInstructionTextToken)()
+ if (self.instr_index is not None) and (self.function.source_function is not None):
+ if not core.BNGetLowLevelILInstructionText(self.function.handle, self.function.source_function.handle,
+ self.function.arch.handle, self.instr_index, tokens, count):
+ return None
+ else:
+ if not core.BNGetLowLevelILExprText(self.function.handle, self.function.arch.handle,
+ self.expr_index, tokens, count):
+ return None
+ result = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens, count.value)
+ core.BNFreeInstructionText(tokens, count.value)
+ return result
+
+ @property
+ def il_basic_block(self):
+ """IL basic block object containing this expression (read-only) (only available on finalized functions)"""
+ view = None
+ if self.function.source_function is not None:
+ view = self.function.source_function.view
+ return LowLevelILBasicBlock(view, core.BNGetLowLevelILBasicBlockForInstruction(self.function.handle, self.instr_index), self.function)
+
+ @property
+ def ssa_form(self):
+ """SSA form of expression (read-only)"""
+ return LowLevelILInstruction(self.function.ssa_form,
+ core.BNGetLowLevelILSSAExprIndex(self.function.handle, self.expr_index))
+
+ @property
+ def non_ssa_form(self):
+ """Non-SSA form of expression (read-only)"""
+ return LowLevelILInstruction(self.function.non_ssa_form,
+ core.BNGetLowLevelILNonSSAExprIndex(self.function.handle, self.expr_index))
+
+ @property
+ def medium_level_il(self):
+ """Gets the medium level IL expression corresponding to this expression (may be None for eliminated instructions)"""
+ expr = self.function.get_medium_level_il_expr_index(self.expr_index)
+ if expr is None:
+ return None
+ return binaryninja.mediumlevelil.MediumLevelILInstruction(self.function.medium_level_il, expr)
+
+ @property
+ def mlil(self):
+ return self.medium_level_il
+
+ @property
+ def mapped_medium_level_il(self):
+ """Gets the mapped medium level IL expression corresponding to this expression"""
+ expr = self.function.get_mapped_medium_level_il_expr_index(self.expr_index)
+ if expr is None:
+ return None
+ return binaryninja.mediumlevelil.MediumLevelILInstruction(self.function.mapped_medium_level_il, expr)
+
+ @property
+ def mmlil(self):
+ return self.mapped_medium_level_il
+
+ @property
+ def value(self):
+ """Value of expression if constant or a known value (read-only)"""
+ value = core.BNGetLowLevelILExprValue(self.function.handle, self.expr_index)
+ result = binaryninja.function.RegisterValue(self.function.arch, value)
+ return result
+
+ @property
+ def possible_values(self):
+ """Possible values of expression using path-sensitive static data flow analysis (read-only)"""
+ value = core.BNGetLowLevelILPossibleExprValues(self.function.handle, self.expr_index)
+ result = binaryninja.function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+ @property
+ def prefix_operands(self):
+ """All operands in the expression tree in prefix order"""
+ result = [LowLevelILOperationAndSize(self.operation, self.size)]
+ for operand in self.operands:
+ if isinstance(operand, LowLevelILInstruction):
+ result += operand.prefix_operands
+ else:
+ result.append(operand)
+ return result
+
+ @property
+ def postfix_operands(self):
+ """All operands in the expression tree in postfix order"""
+ result = []
+ for operand in self.operands:
+ if isinstance(operand, LowLevelILInstruction):
+ result += operand.postfix_operands
+ else:
+ result.append(operand)
+ result.append(LowLevelILOperationAndSize(self.operation, self.size))
+ return result
+
+[docs] def get_reg_value(self, reg):
+ reg = self.function.arch.get_reg_index(reg)
+ value = core.BNGetLowLevelILRegisterValueAtInstruction(self.function.handle, reg, self.instr_index)
+ result = binaryninja.function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_reg_value_after(self, reg):
+ reg = self.function.arch.get_reg_index(reg)
+ value = core.BNGetLowLevelILRegisterValueAfterInstruction(self.function.handle, reg, self.instr_index)
+ result = binaryninja.function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_possible_reg_values(self, reg):
+ reg = self.function.arch.get_reg_index(reg)
+ value = core.BNGetLowLevelILPossibleRegisterValuesAtInstruction(self.function.handle, reg, self.instr_index)
+ result = binaryninja.function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_possible_reg_values_after(self, reg):
+ reg = self.function.arch.get_reg_index(reg)
+ value = core.BNGetLowLevelILPossibleRegisterValuesAfterInstruction(self.function.handle, reg, self.instr_index)
+ result = binaryninja.function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_flag_value(self, flag):
+ flag = self.function.arch.get_flag_index(flag)
+ value = core.BNGetLowLevelILFlagValueAtInstruction(self.function.handle, flag, self.instr_index)
+ result = binaryninja.function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_flag_value_after(self, flag):
+ flag = self.function.arch.get_flag_index(flag)
+ value = core.BNGetLowLevelILFlagValueAfterInstruction(self.function.handle, flag, self.instr_index)
+ result = binaryninja.function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_possible_flag_values(self, flag):
+ flag = self.function.arch.get_flag_index(flag)
+ value = core.BNGetLowLevelILPossibleFlagValuesAtInstruction(self.function.handle, flag, self.instr_index)
+ result = binaryninja.function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_possible_flag_values_after(self, flag):
+ flag = self.function.arch.get_flag_index(flag)
+ value = core.BNGetLowLevelILPossibleFlagValuesAfterInstruction(self.function.handle, flag, self.instr_index)
+ result = binaryninja.function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_stack_contents(self, offset, size):
+ value = core.BNGetLowLevelILStackContentsAtInstruction(self.function.handle, offset, size, self.instr_index)
+ result = binaryninja.function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_stack_contents_after(self, offset, size):
+ value = core.BNGetLowLevelILStackContentsAfterInstruction(self.function.handle, offset, size, self.instr_index)
+ result = binaryninja.function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_possible_stack_contents(self, offset, size):
+ value = core.BNGetLowLevelILPossibleStackContentsAtInstruction(self.function.handle, offset, size, self.instr_index)
+ result = binaryninja.function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_possible_stack_contents_after(self, offset, size):
+ value = core.BNGetLowLevelILPossibleStackContentsAfterInstruction(self.function.handle, offset, size, self.instr_index)
+ result = binaryninja.function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+
+[docs]class LowLevelILExpr(object):
+ """
+ ``class LowLevelILExpr`` hold the index of IL Expressions.
+
+ .. note:: This class shouldn't be instantiated directly. Rather the helper members of LowLevelILFunction should be \
+ used instead.
+ """
+
+
+
+[docs]class LowLevelILFunction(object):
+ """
+ ``class LowLevelILFunction`` contains the list of LowLevelILExpr objects that make up a binaryninja.function. LowLevelILExpr
+ objects can be added to the LowLevelILFunction by calling ``append`` and passing the result of the various class
+ methods which return LowLevelILExpr objects.
+
+
+ LowLevelILFlagCondition values used as parameters in the ``flag_condition`` method.
+
+ ======================= ========== ===============================
+ LowLevelILFlagCondition Operator Description
+ ======================= ========== ===============================
+ LLFC_E == Equal
+ LLFC_NE != Not equal
+ LLFC_SLT s< Signed less than
+ LLFC_ULT u< Unsigned less than
+ LLFC_SLE s<= Signed less than or equal
+ LLFC_ULE u<= Unsigned less than or equal
+ LLFC_SGE s>= Signed greater than or equal
+ LLFC_UGE u>= Unsigned greater than or equal
+ LLFC_SGT s> Signed greater than
+ LLFC_UGT u> Unsigned greater than
+ LLFC_NEG - Negative
+ LLFC_POS + Positive
+ LLFC_O overflow Overflow
+ LLFC_NO !overflow No overflow
+ ======================= ========== ===============================
+ """
+[docs] def __init__(self, arch = None, handle = None, source_func = None):
+ self.arch = arch
+ self.source_function = source_func
+ if handle is not None:
+ self.handle = core.handle_of_type(handle, core.BNLowLevelILFunction)
+ if self.source_function is None:
+ source_handle = core.BNGetLowLevelILOwnerFunction(self.handle)
+ if source_handle:
+ self.source_function = binaryninja.function.Function(handle = source_handle)
+ else:
+ self.source_function = None
+ if self.arch is None:
+ self.arch = self.source_function.arch
+ else:
+ if self.arch is None:
+ self.arch = self.source_function.arch
+ if self.source_function is None:
+ func_handle = None
+ else:
+ func_handle = self.source_function.handle
+ self.handle = core.BNCreateLowLevelILFunction(arch.handle, func_handle)
+
+ def __del__(self):
+ if self.handle is not None:
+ core.BNFreeLowLevelILFunction(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, LowLevelILFunction):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, LowLevelILFunction):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def current_address(self):
+ """Current IL Address (read/write)"""
+ return core.BNLowLevelILGetCurrentAddress(self.handle)
+
+ @current_address.setter
+ def current_address(self, value):
+ core.BNLowLevelILSetCurrentAddress(self.handle, self.arch.handle, value)
+
+[docs] def set_current_address(self, value, arch = None):
+ if arch is None:
+ arch = self.arch
+ core.BNLowLevelILSetCurrentAddress(self.handle, arch.handle, value)
+
+[docs] def set_current_source_block(self, block):
+ core.BNLowLevelILSetCurrentSourceBlock(self.handle, block.handle)
+
+ @property
+ def temp_reg_count(self):
+ """Number of temporary registers (read-only)"""
+ return core.BNGetLowLevelILTemporaryRegisterCount(self.handle)
+
+ @property
+ def temp_flag_count(self):
+ """Number of temporary flags (read-only)"""
+ return core.BNGetLowLevelILTemporaryFlagCount(self.handle)
+
+ @property
+ def basic_blocks(self):
+ """list of LowLevelILBasicBlock objects (read-only)"""
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetLowLevelILBasicBlockList(self.handle, count)
+ result = []
+ view = None
+ if self.source_function is not None:
+ view = self.source_function.view
+ for i in range(0, count.value):
+ result.append(LowLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self))
+ core.BNFreeBasicBlockList(blocks, count.value)
+ return result
+
+ @property
+ def instructions(self):
+ """A generator of llil instructions of the current llil function"""
+ for block in self.basic_blocks:
+ for i in block:
+ yield i
+
+ @property
+ def ssa_form(self):
+ """Low level IL in SSA form (read-only)"""
+ result = core.BNGetLowLevelILSSAForm(self.handle)
+ if not result:
+ return None
+ return LowLevelILFunction(self.arch, result, self.source_function)
+
+ @property
+ def non_ssa_form(self):
+ """Low level IL in non-SSA (default) form (read-only)"""
+ result = core.BNGetLowLevelILNonSSAForm(self.handle)
+ if not result:
+ return None
+ return LowLevelILFunction(self.arch, result, self.source_function)
+
+ @property
+ def medium_level_il(self):
+ """Medium level IL for this low level IL."""
+ result = core.BNGetMediumLevelILForLowLevelIL(self.handle)
+ if not result:
+ return None
+ return binaryninja.mediumlevelil.MediumLevelILFunction(self.arch, result, self.source_function)
+
+ @property
+ def mlil(self):
+ return self.medium_level_il
+
+ @property
+ def mapped_medium_level_il(self):
+ """Medium level IL with mappings between low level IL and medium level IL. Unused stores are not removed.
+ Typically, this should only be used to answer queries on assembly or low level IL where the query is
+ easier to perform on medium level IL."""
+ result = core.BNGetMappedMediumLevelIL(self.handle)
+ if not result:
+ return None
+ return binaryninja.mediumlevelil.MediumLevelILFunction(self.arch, result, self.source_function)
+
+ @property
+ def mmlil(self):
+ return self.mapped_medium_level_il
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __len__(self):
+ return int(core.BNGetLowLevelILInstructionCount(self.handle))
+
+ def __getitem__(self, i):
+ if isinstance(i, slice) or isinstance(i, tuple):
+ raise IndexError("expected integer instruction index")
+ if isinstance(i, LowLevelILExpr):
+ return LowLevelILInstruction(self, i.index)
+ if (i < 0) or (i >= len(self)):
+ raise IndexError("index out of range")
+ return LowLevelILInstruction(self, core.BNGetLowLevelILIndexForInstruction(self.handle, i), i)
+
+ def __setitem__(self, i, j):
+ raise IndexError("instruction modification not implemented")
+
+ def __iter__(self):
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetLowLevelILBasicBlockList(self.handle, count)
+ view = None
+ if self.source_function is not None:
+ view = self.source_function.view
+ try:
+ for i in range(0, count.value):
+ yield LowLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self)
+ finally:
+ core.BNFreeBasicBlockList(blocks, count.value)
+
+[docs] def get_instruction_start(self, addr, arch = None):
+ if arch is None:
+ arch = self.arch
+ result = core.BNLowLevelILGetInstructionStart(self.handle, arch.handle, addr)
+ if result >= core.BNGetLowLevelILInstructionCount(self.handle):
+ return None
+ return result
+
+
+
+[docs] def set_indirect_branches(self, branches):
+ branch_list = (core.BNArchitectureAndAddress * len(branches))()
+ for i in range(len(branches)):
+ branch_list[i].arch = branches[i][0].handle
+ branch_list[i].address = branches[i][1]
+ core.BNLowLevelILSetIndirectBranches(self.handle, branch_list, len(branches))
+
+[docs] def expr(self, operation, a = 0, b = 0, c = 0, d = 0, size = 0, flags = None):
+ if isinstance(operation, str):
+ operation = LowLevelILOperation[operation]
+ elif isinstance(operation, LowLevelILOperation):
+ operation = operation.value
+ if isinstance(flags, str):
+ flags = self.arch.get_flag_write_type_by_name(flags)
+ elif flags is None:
+ flags = 0
+ return LowLevelILExpr(core.BNLowLevelILAddExpr(self.handle, operation, size, flags, a, b, c, d))
+
+[docs] def append(self, expr):
+ """
+ ``append`` adds the LowLevelILExpr ``expr`` to the current LowLevelILFunction.
+
+ :param LowLevelILExpr expr: the LowLevelILExpr to add to the current LowLevelILFunction
+ :return: number of LowLevelILExpr in the current function
+ :rtype: int
+ """
+ return core.BNLowLevelILAddInstruction(self.handle, expr.index)
+
+[docs] def nop(self):
+ """
+ ``nop`` no operation, this instruction does nothing
+
+ :return: The no operation expression
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_NOP)
+
+[docs] def set_reg(self, size, reg, value, flags = 0):
+ """
+ ``set_reg`` sets the register ``reg`` of size ``size`` to the expression ``value``
+
+ :param int size: size of the register parameter in bytes
+ :param str reg: the register name
+ :param LowLevelILExpr value: an expression to set the register to
+ :param str flags: which flags are set by this operation
+ :return: The expression ``reg = value``
+ :rtype: LowLevelILExpr
+ """
+ reg = self.arch.get_reg_index(reg)
+ return self.expr(LowLevelILOperation.LLIL_SET_REG, reg, value.index, size = size, flags = flags)
+
+[docs] def set_reg_split(self, size, hi, lo, value, flags = 0):
+ """
+ ``set_reg_split`` uses ``hi`` and ``lo`` as a single extended register setting ``hi:lo`` to the expression
+ ``value``.
+
+ :param int size: size of the register parameter in bytes
+ :param str hi: the high register name
+ :param str lo: the low register name
+ :param LowLevelILExpr value: an expression to set the split registers to
+ :param str flags: which flags are set by this operation
+ :return: The expression ``hi:lo = value``
+ :rtype: LowLevelILExpr
+ """
+ hi = self.arch.get_reg_index(hi)
+ lo = self.arch.get_reg_index(lo)
+ return self.expr(LowLevelILOperation.LLIL_SET_REG_SPLIT, hi, lo, value.index, size = size, flags = flags)
+
+[docs] def set_reg_stack_top_relative(self, size, reg_stack, entry, value, flags = 0):
+ """
+ ``set_reg_stack_top_relative`` sets the top-relative entry ``entry`` of size ``size`` in register
+ stack ``reg_stack`` to the expression ``value``
+
+ :param int size: size of the register parameter in bytes
+ :param str reg_stack: the register stack name
+ :param LowLevelILExpr entry: an expression for which stack entry to set
+ :param LowLevelILExpr value: an expression to set the entry to
+ :param str flags: which flags are set by this operation
+ :return: The expression ``reg_stack[entry] = value``
+ :rtype: LowLevelILExpr
+ """
+ reg_stack = self.arch.get_reg_stack_index(reg_stack)
+ return self.expr(LowLevelILOperation.LLIL_SET_REG_STACK_REL, reg_stack, entry.index, value.index,
+ size = size, flags = flags)
+
+[docs] def reg_stack_push(self, size, reg_stack, value, flags = 0):
+ """
+ ``reg_stack_push`` pushes the expression ``value`` of size ``size`` onto the top of the register
+ stack ``reg_stack``
+
+ :param int size: size of the register parameter in bytes
+ :param str reg_stack: the register stack name
+ :param LowLevelILExpr value: an expression to push
+ :param str flags: which flags are set by this operation
+ :return: The expression ``reg_stack.push(value)``
+ :rtype: LowLevelILExpr
+ """
+ reg_stack = self.arch.get_reg_stack_index(reg_stack)
+ return self.expr(LowLevelILOperation.LLIL_REG_STACK_PUSH, reg_stack, value.index, size = size, flags = flags)
+
+[docs] def set_flag(self, flag, value):
+ """
+ ``set_flag`` sets the flag ``flag`` to the LowLevelILExpr ``value``
+
+ :param str flag: the low register name
+ :param LowLevelILExpr value: an expression to set the flag to
+ :return: The expression FLAG.flag = value
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_SET_FLAG, self.arch.get_flag_by_name(flag), value.index)
+
+[docs] def load(self, size, addr):
+ """
+ ``load`` Reads ``size`` bytes from the expression ``addr``
+
+ :param int size: number of bytes to read
+ :param LowLevelILExpr addr: the expression to read memory from
+ :return: The expression ``[addr].size``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_LOAD, addr.index, size=size)
+
+[docs] def store(self, size, addr, value, flags=None):
+ """
+ ``store`` Writes ``size`` bytes to expression ``addr`` read from expression ``value``
+
+ :param int size: number of bytes to write
+ :param LowLevelILExpr addr: the expression to write to
+ :param LowLevelILExpr value: the expression to be written
+ :param str flags: which flags are set by this operation
+ :return: The expression ``[addr].size = value``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_STORE, addr.index, value.index, size=size, flags=flags)
+
+[docs] def push(self, size, value):
+ """
+ ``push`` writes ``size`` bytes from expression ``value`` to the stack, adjusting the stack by ``size``.
+
+ :param int size: number of bytes to write and adjust the stack by
+ :param LowLevelILExpr value: the expression to write
+ :return: The expression push(value)
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_PUSH, value.index, size=size)
+
+[docs] def pop(self, size):
+ """
+ ``pop`` reads ``size`` bytes from the stack, adjusting the stack by ``size``.
+
+ :param int size: number of bytes to read from the stack
+ :return: The expression ``pop``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_POP, size=size)
+
+[docs] def reg(self, size, reg):
+ """
+ ``reg`` returns a register of size ``size`` with name ``reg``
+
+ :param int size: the size of the register in bytes
+ :param str reg: the name of the register
+ :return: A register expression for the given string
+ :rtype: LowLevelILExpr
+ """
+ reg = self.arch.get_reg_index(reg)
+ return self.expr(LowLevelILOperation.LLIL_REG, reg, size=size)
+
+[docs] def reg_split(self, size, hi, lo):
+ """
+ ``reg_split`` combines registers of size ``size`` with names ``hi`` and ``lo``
+
+ :param int size: the size of the register in bytes
+ :param str hi: register holding high part of value
+ :param str lo: register holding low part of value
+ :return: The expression ``hi:lo``
+ :rtype: LowLevelILExpr
+ """
+ hi = self.arch.get_reg_index(hi)
+ lo = self.arch.get_reg_index(lo)
+ return self.expr(LowLevelILOperation.LLIL_REG_SPLIT, hi, lo, size=size)
+
+[docs] def reg_stack_top_relative(self, size, reg_stack, entry):
+ """
+ ``reg_stack_top_relative`` returns a register stack entry of size ``size`` at top-relative
+ location ``entry`` in register stack with name ``reg_stack``
+
+ :param int size: the size of the register in bytes
+ :param str reg_stack: the name of the register stack
+ :param LowLevelILExpr entry: an expression for which stack entry to fetch
+ :return: The expression ``reg_stack[entry]``
+ :rtype: LowLevelILExpr
+ """
+ reg_stack = self.arch.get_reg_stack_index(reg_stack)
+ return self.expr(LowLevelILOperation.LLIL_REG_STACK_REL, reg_stack, entry.index, size=size)
+
+[docs] def reg_stack_pop(self, size, reg_stack):
+ """
+ ``reg_stack_pop`` returns the top entry of size ``size`` in register stack with name ``reg_stack``, and
+ removes the entry from the stack
+
+ :param int size: the size of the register in bytes
+ :param str reg_stack: the name of the register stack
+ :return: The expression ``reg_stack.pop``
+ :rtype: LowLevelILExpr
+ """
+ reg_stack = self.arch.get_reg_stack_index(reg_stack)
+ return self.expr(LowLevelILOperation.LLIL_REG_STACK_POP, reg_stack, size=size)
+
+[docs] def const(self, size, value):
+ """
+ ``const`` returns an expression for the constant integer ``value`` with size ``size``
+
+ :param int size: the size of the constant in bytes
+ :param int value: integer value of the constant
+ :return: A constant expression of given value and size
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CONST, value, size=size)
+
+[docs] def const_pointer(self, size, value):
+ """
+ ``const_pointer`` returns an expression for the constant pointer ``value`` with size ``size``
+
+ :param int size: the size of the pointer in bytes
+ :param int value: address referenced by pointer
+ :return: A constant expression of given value and size
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CONST_PTR, value, size=size)
+
+[docs] def reloc_pointer(self, size, value):
+ """
+ ``reloc_pointer`` returns an expression for the constant relocated pointer ``value`` with size ``size``
+
+ :param int size: the size of the pointer in bytes
+ :param int value: address referenced by pointer
+ :return: A constant expression of given value and size
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_EXTERN_PTR, value, size=size)
+
+[docs] def float_const_raw(self, size, value):
+ """
+ ``float_const_raw`` returns an expression for the constant raw binary floating point
+ value ``value`` with size ``size``
+
+ :param int size: the size of the constant in bytes
+ :param int value: integer value for the raw binary representation of the constant
+ :return: A constant expression of given value and size
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FLOAT_CONST, value, size=size)
+
+[docs] def float_const_single(self, value):
+ """
+ ``float_const_single`` returns an expression for the single precision floating point value ``value``
+
+ :param float value: float value for the constant
+ :return: A constant expression of given value and size
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FLOAT_CONST, struct.unpack("I", struct.pack("f", value))[0], size=4)
+
+[docs] def float_const_double(self, value):
+ """
+ ``float_const_double`` returns an expression for the double precision floating point value ``value``
+
+ :param float value: float value for the constant
+ :return: A constant expression of given value and size
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FLOAT_CONST, struct.unpack("Q", struct.pack("d", value))[0], size=8)
+
+[docs] def flag(self, reg):
+ """
+ ``flag`` returns a flag expression for the given flag name.
+
+ :param str reg: name of the flag expression to retrieve
+ :return: A flag expression of given flag name
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FLAG, self.arch.get_flag_by_name(reg))
+
+[docs] def flag_bit(self, size, reg, bit):
+ """
+ ``flag_bit`` sets the flag named ``reg`` and size ``size`` to the constant integer value ``bit``
+
+ :param int size: the size of the flag
+ :param str reg: flag value
+ :param int bit: integer value to set the bit to
+ :return: A constant expression of given value and size ``FLAG.reg = bit``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FLAG_BIT, self.arch.get_flag_by_name(reg), bit, size=size)
+
+[docs] def add(self, size, a, b, flags=None):
+ """
+ ``add`` adds expression ``a`` to expression ``b`` potentially setting flags ``flags`` and returning
+ an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``add.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_ADD, a.index, b.index, size=size, flags=flags)
+
+[docs] def add_carry(self, size, a, b, carry, flags=None):
+ """
+ ``add_carry`` adds with carry expression ``a`` to expression ``b`` potentially setting flags ``flags`` and
+ returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param LowLevelILExpr carry: Carry flag expression
+ :param str flags: flags to set
+ :return: The expression ``adc.<size>{<flags>}(a, b, carry)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_ADC, a.index, b.index, carry.index, size=size, flags=flags)
+
+[docs] def sub(self, size, a, b, flags=None):
+ """
+ ``sub`` subtracts expression ``b`` from expression ``a`` potentially setting flags ``flags`` and returning
+ an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``sub.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_SUB, a.index, b.index, size=size, flags=flags)
+
+[docs] def sub_borrow(self, size, a, b, carry, flags=None):
+ """
+ ``sub_borrow`` subtracts with borrow expression ``b`` from expression ``a`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param LowLevelILExpr carry: Carry flag expression
+ :param str flags: flags to set
+ :return: The expression ``sbb.<size>{<flags>}(a, b, carry)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_SBB, a.index, b.index, carry.index, size=size, flags=flags)
+
+[docs] def and_expr(self, size, a, b, flags=None):
+ """
+ ``and_expr`` bitwise and's expression ``a`` and expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``and.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_AND, a.index, b.index, size=size, flags=flags)
+
+[docs] def or_expr(self, size, a, b, flags=None):
+ """
+ ``or_expr`` bitwise or's expression ``a`` and expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``or.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_OR, a.index, b.index, size=size, flags=flags)
+
+[docs] def xor_expr(self, size, a, b, flags=None):
+ """
+ ``xor_expr`` xor's expression ``a`` with expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``xor.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_XOR, a.index, b.index, size=size, flags=flags)
+
+[docs] def shift_left(self, size, a, b, flags=None):
+ """
+ ``shift_left`` subtracts with borrow expression ``b`` from expression ``a`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``lsl.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_LSL, a.index, b.index, size=size, flags=flags)
+
+[docs] def logical_shift_right(self, size, a, b, flags=None):
+ """
+ ``logical_shift_right`` shifts logically right expression ``a`` by expression ``b`` potentially setting flags
+ ``flags``and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``lsr.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_LSR, a.index, b.index, size=size, flags=flags)
+
+[docs] def arith_shift_right(self, size, a, b, flags=None):
+ """
+ ``arith_shift_right`` shifts arithmatic right expression ``a`` by expression ``b`` potentially setting flags
+ ``flags`` and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``asr.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_ASR, a.index, b.index, size=size, flags=flags)
+
+[docs] def rotate_left(self, size, a, b, flags=None):
+ """
+ ``rotate_left`` bitwise rotates left expression ``a`` by expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``rol.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_ROL, a.index, b.index, size=size, flags=flags)
+
+[docs] def rotate_left_carry(self, size, a, b, carry, flags=None):
+ """
+ ``rotate_left_carry`` bitwise rotates left with carry expression ``a`` by expression ``b`` potentially setting
+ flags ``flags`` and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param LowLevelILExpr carry: Carry flag expression
+ :param str flags: optional, flags to set
+ :return: The expression ``rlc.<size>{<flags>}(a, b, carry)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_RLC, a.index, b.index, carry.index, size=size, flags=flags)
+
+[docs] def rotate_right(self, size, a, b, flags=None):
+ """
+ ``rotate_right`` bitwise rotates right expression ``a`` by expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``ror.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_ROR, a.index, b.index, size=size, flags=flags)
+
+[docs] def rotate_right_carry(self, size, a, b, carry, flags=None):
+ """
+ ``rotate_right_carry`` bitwise rotates right with carry expression ``a`` by expression ``b`` potentially setting
+ flags ``flags`` and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param LowLevelILExpr carry: Carry flag expression
+ :param str flags: optional, flags to set
+ :return: The expression ``rrc.<size>{<flags>}(a, b, carry)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_RRC, a.index, b.index, carry.index, size=size, flags=flags)
+
+[docs] def mult(self, size, a, b, flags=None):
+ """
+ ``mult`` multiplies expression ``a`` by expression ``b`` potentially setting flags ``flags`` and returning an
+ expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``sbc.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_MUL, a.index, b.index, size=size, flags=flags)
+
+[docs] def mult_double_prec_signed(self, size, a, b, flags=None):
+ """
+ ``mult_double_prec_signed`` multiplies signed with double precision expression ``a`` by expression ``b``
+ potentially setting flags ``flags`` and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``muls.dp.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_MULS_DP, a.index, b.index, size=size, flags=flags)
+
+[docs] def mult_double_prec_unsigned(self, size, a, b, flags=None):
+ """
+ ``mult_double_prec_unsigned`` multiplies unsigned with double precision expression ``a`` by expression ``b``
+ potentially setting flags ``flags`` and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``mulu.dp.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_MULU_DP, a.index, b.index, size=size, flags=flags)
+
+[docs] def div_signed(self, size, a, b, flags=None):
+ """
+ ``div_signed`` signed divide expression ``a`` by expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``divs.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_DIVS, a.index, b.index, size=size, flags=flags)
+
+[docs] def div_double_prec_signed(self, size, a, b, flags=None):
+ """
+ ``div_double_prec_signed`` signed double precision divide using expression ``a`` as a
+ single double precision register by expression ``b`` potentially setting flags ``flags`` and returning an
+ expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``divs.dp.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_DIVS_DP, a.index, b.index, size=size, flags=flags)
+
+[docs] def div_unsigned(self, size, a, b, flags=None):
+ """
+ ``div_unsigned`` unsigned divide expression ``a`` by expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``divu.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_DIVU, a.index, b.index, size=size, flags=flags)
+
+[docs] def div_double_prec_unsigned(self, size, a, b, flags=None):
+ """
+ ``div_double_prec_unsigned`` unsigned double precision divide using expression ``a`` as
+ a single double precision register by expression ``b`` potentially setting flags ``flags`` and returning an
+ expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``divu.dp.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_DIVU_DP, a.index, b.index, size=size, flags=flags)
+
+[docs] def mod_signed(self, size, a, b, flags=None):
+ """
+ ``mod_signed`` signed modulus expression ``a`` by expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``mods.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_MODS, a.index, b.index, size=size, flags=flags)
+
+[docs] def mod_double_prec_signed(self, size, a, b, flags=None):
+ """
+ ``mod_double_prec_signed`` signed double precision modulus using expression ``a`` as a single
+ double precision register by expression ``b`` potentially setting flags ``flags`` and returning an expression
+ of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``mods.dp.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_MODS_DP, a.index, b.index, size=size, flags=flags)
+
+[docs] def mod_unsigned(self, size, a, b, flags=None):
+ """
+ ``mod_unsigned`` unsigned modulus expression ``a`` by expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``modu.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_MODU, a.index, b.index, size=size, flags=flags)
+
+[docs] def mod_double_prec_unsigned(self, size, a, b, flags=None):
+ """
+ ``mod_double_prec_unsigned`` unsigned double precision modulus using expression ``a`` as
+ a single double precision register by expression ``b`` potentially setting flags ``flags`` and returning an
+ expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: optional, flags to set
+ :return: The expression ``modu.dp.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_MODU_DP, a.index, b.index, size=size, flags=flags)
+
+[docs] def neg_expr(self, size, value, flags=None):
+ """
+ ``neg_expr`` two's complement sign negation of expression ``value`` of size ``size`` potentially setting flags
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``neg.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_NEG, value.index, size=size, flags=flags)
+
+[docs] def not_expr(self, size, value, flags=None):
+ """
+ ``not_expr`` bitwise inverse of expression ``value`` of size ``size`` potentially setting flags
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to bitwise invert
+ :param str flags: optional, flags to set
+ :return: The expression ``not.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_NOT, value.index, size=size, flags=flags)
+
+[docs] def sign_extend(self, size, value, flags=None):
+ """
+ ``sign_extend`` two's complement sign-extends the expression in ``value`` to ``size`` bytes
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to sign extend
+ :param str flags: optional, flags to set
+ :return: The expression ``sx.<size>(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_SX, value.index, size=size, flags=flags)
+
+[docs] def zero_extend(self, size, value, flags=None):
+ """
+ ``zero_extend`` zero-extends the expression in ``value`` to ``size`` bytes
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to zero extend
+ :return: The expression ``zx.<size>(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_ZX, value.index, size=size, flags=flags)
+
+[docs] def low_part(self, size, value, flags=None):
+ """
+ ``low_part`` truncates ``value`` to ``size`` bytes
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to zero extend
+ :return: The expression ``(value).<size>``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_LOW_PART, value.index, size=size, flags=flags)
+
+[docs] def jump(self, dest):
+ """
+ ``jump`` returns an expression which jumps (branches) to the expression ``dest``
+
+ :param LowLevelILExpr dest: the expression to jump to
+ :return: The expression ``jump(dest)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_JUMP, dest.index)
+
+[docs] def call(self, dest):
+ """
+ ``call`` returns an expression which first pushes the address of the next instruction onto the stack then jumps
+ (branches) to the expression ``dest``
+
+ :param LowLevelILExpr dest: the expression to call
+ :return: The expression ``call(dest)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CALL, dest.index)
+
+[docs] def call_stack_adjust(self, dest, stack_adjust):
+ """
+ ``call_stack_adjust`` returns an expression which first pushes the address of the next instruction onto the stack
+ then jumps (branches) to the expression ``dest``. After the function exits, ``stack_adjust`` is added to the
+ stack pointer register.
+
+ :param LowLevelILExpr dest: the expression to call
+ :return: The expression ``call(dest), stack += stack_adjust``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CALL_STACK_ADJUST, dest.index, stack_adjust)
+
+[docs] def tailcall(self, dest):
+ """
+ ``tailcall`` returns an expression which jumps (branches) to the expression ``dest``
+
+ :param LowLevelILExpr dest: the expression to jump to
+ :return: The expression ``tailcall(dest)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_TAILCALL, dest.index)
+
+[docs] def ret(self, dest):
+ """
+ ``ret`` returns an expression which jumps (branches) to the expression ``dest``. ``ret`` is a special alias for
+ jump that makes the disassembler stop disassembling.
+
+ :param LowLevelILExpr dest: the expression to jump to
+ :return: The expression ``jump(dest)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_RET, dest.index)
+
+[docs] def no_ret(self):
+ """
+ ``no_ret`` returns an expression halts disassembly
+
+ :return: The expression ``noreturn``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_NORET)
+
+[docs] def flag_condition(self, cond, sem_class = None):
+ """
+ ``flag_condition`` returns a flag_condition expression for the given LowLevelILFlagCondition
+
+ :param LowLevelILFlagCondition cond: Flag condition expression to retrieve
+ :param str sem_class: Optional semantic flag class
+ :return: A flag_condition expression
+ :rtype: LowLevelILExpr
+ """
+ if isinstance(cond, str):
+ cond = LowLevelILFlagCondition[cond]
+ elif isinstance(cond, LowLevelILFlagCondition):
+ cond = cond.value
+ class_index = self.arch.get_semantic_flag_class_index(sem_class)
+ return self.expr(LowLevelILOperation.LLIL_FLAG_COND, cond, class_index)
+
+[docs] def flag_group(self, sem_group):
+ """
+ ``flag_group`` returns a flag_group expression for the given semantic flag group
+
+ :param str sem_group: Semantic flag group to access
+ :return: A flag_group expression
+ :rtype: LowLevelILExpr
+ """
+ group = self.arch.get_semantic_flag_group_index(sem_group)
+ return self.expr(LowLevelILOperation.LLIL_FLAG_GROUP, group)
+
+[docs] def compare_equal(self, size, a, b):
+ """
+ ``compare_equal`` returns comparison expression of size ``size`` checking if expression ``a`` is equal to
+ expression ``b``
+
+ :param int size: size in bytes
+ :param LowLevelILExpr a: LHS of comparison
+ :param LowLevelILExpr b: RHS of comparison
+ :return: a comparison expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CMP_E, a.index, b.index, size = size)
+
+[docs] def compare_not_equal(self, size, a, b):
+ """
+ ``compare_not_equal`` returns comparison expression of size ``size`` checking if expression ``a`` is not equal to
+ expression ``b``
+
+ :param int size: size in bytes
+ :param LowLevelILExpr a: LHS of comparison
+ :param LowLevelILExpr b: RHS of comparison
+ :return: a comparison expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CMP_NE, a.index, b.index, size = size)
+
+[docs] def compare_signed_less_than(self, size, a, b):
+ """
+ ``compare_signed_less_than`` returns comparison expression of size ``size`` checking if expression ``a`` is
+ signed less than expression ``b``
+
+ :param int size: size in bytes
+ :param LowLevelILExpr a: LHS of comparison
+ :param LowLevelILExpr b: RHS of comparison
+ :return: a comparison expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CMP_SLT, a.index, b.index, size = size)
+
+[docs] def compare_unsigned_less_than(self, size, a, b):
+ """
+ ``compare_unsigned_less_than`` returns comparison expression of size ``size`` checking if expression ``a`` is
+ unsigned less than expression ``b``
+
+ :param int size: size in bytes
+ :param LowLevelILExpr a: LHS of comparison
+ :param LowLevelILExpr b: RHS of comparison
+ :return: a comparison expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CMP_ULT, a.index, b.index, size = size)
+
+[docs] def compare_signed_less_equal(self, size, a, b):
+ """
+ ``compare_signed_less_equal`` returns comparison expression of size ``size`` checking if expression ``a`` is
+ signed less than or equal to expression ``b``
+
+ :param int size: size in bytes
+ :param LowLevelILExpr a: LHS of comparison
+ :param LowLevelILExpr b: RHS of comparison
+ :return: a comparison expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CMP_SLE, a.index, b.index, size = size)
+
+[docs] def compare_unsigned_less_equal(self, size, a, b):
+ """
+ ``compare_unsigned_less_equal`` returns comparison expression of size ``size`` checking if expression ``a`` is
+ unsigned less than or equal to expression ``b``
+
+ :param int size: size in bytes
+ :param LowLevelILExpr a: LHS of comparison
+ :param LowLevelILExpr b: RHS of comparison
+ :return: a comparison expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CMP_ULE, a.index, b.index, size = size)
+
+[docs] def compare_signed_greater_equal(self, size, a, b):
+ """
+ ``compare_signed_greater_equal`` returns comparison expression of size ``size`` checking if expression ``a`` is
+ signed greater than or equal to expression ``b``
+
+ :param int size: size in bytes
+ :param LowLevelILExpr a: LHS of comparison
+ :param LowLevelILExpr b: RHS of comparison
+ :return: a comparison expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CMP_SGE, a.index, b.index, size = size)
+
+[docs] def compare_unsigned_greater_equal(self, size, a, b):
+ """
+ ``compare_unsigned_greater_equal`` returns comparison expression of size ``size`` checking if expression ``a``
+ is unsigned greater than or equal to expression ``b``
+
+ :param int size: size in bytes
+ :param LowLevelILExpr a: LHS of comparison
+ :param LowLevelILExpr b: RHS of comparison
+ :return: a comparison expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CMP_UGE, a.index, b.index, size = size)
+
+[docs] def compare_signed_greater_than(self, size, a, b):
+ """
+ ``compare_signed_greater_than`` returns comparison expression of size ``size`` checking if expression ``a`` is
+ signed greater than or equal to expression ``b``
+
+ :param int size: size in bytes
+ :param LowLevelILExpr a: LHS of comparison
+ :param LowLevelILExpr b: RHS of comparison
+ :return: a comparison expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CMP_SGT, a.index, b.index, size = size)
+
+[docs] def compare_unsigned_greater_than(self, size, a, b):
+ """
+ ``compare_unsigned_greater_than`` returns comparison expression of size ``size`` checking if expression ``a`` is
+ unsigned greater than or equal to expression ``b``
+
+ :param int size: size in bytes
+ :param LowLevelILExpr a: LHS of comparison
+ :param LowLevelILExpr b: RHS of comparison
+ :return: a comparison expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CMP_UGT, a.index, b.index, size = size)
+
+[docs] def test_bit(self, size, a, b):
+ return self.expr(LowLevelILOperation.LLIL_TEST_BIT, a.index, b.index, size = size)
+
+[docs] def system_call(self):
+ """
+ ``system_call`` return a system call expression.
+
+ :return: a system call expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_SYSCALL)
+
+[docs] def intrinsic(self, outputs, intrinsic, params, flags=None):
+ """
+ ``intrinsic`` return an intrinsic expression.
+
+ :return: an intrinsic expression.
+ :rtype: LowLevelILExpr
+ """
+ output_list = []
+ for output in outputs:
+ if isinstance(output, ILFlag):
+ output_list.append((1 << 32) | output.index)
+ else:
+ output_list.append(output.index)
+ param_list = []
+ for param in params:
+ param_list.append(param.index)
+ call_param = self.expr(LowLevelILOperation.LLIL_CALL_PARAM, len(params), self.add_operand_list(param_list).index)
+ return self.expr(LowLevelILOperation.LLIL_INTRINSIC, len(outputs), self.add_operand_list(output_list).index,
+ self.arch.get_intrinsic_index(intrinsic), call_param.index, flags = flags)
+
+[docs] def breakpoint(self):
+ """
+ ``breakpoint`` returns a processor breakpoint expression.
+
+ :return: a breakpoint expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_BP)
+
+[docs] def trap(self, value):
+ """
+ ``trap`` returns a processor trap (interrupt) expression of the given integer ``value``.
+
+ :param int value: trap (interrupt) number
+ :return: a trap expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_TRAP, value)
+
+[docs] def undefined(self):
+ """
+ ``undefined`` returns the undefined expression. This should be used for instructions which perform functions but
+ aren't important for dataflow or partial emulation purposes.
+
+ :return: the unimplemented expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_UNDEF)
+
+[docs] def unimplemented(self):
+ """
+ ``unimplemented`` returns the unimplemented expression. This should be used for all instructions which aren't
+ implemented.
+
+ :return: the unimplemented expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_UNIMPL)
+
+[docs] def unimplemented_memory_ref(self, size, addr):
+ """
+ ``unimplemented_memory_ref`` a memory reference to expression ``addr`` of size ``size`` with unimplemented operation.
+
+ :param int size: size in bytes of the memory reference
+ :param LowLevelILExpr addr: expression to reference memory
+ :return: the unimplemented memory reference expression.
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_UNIMPL_MEM, addr.index, size = size)
+
+[docs] def float_add(self, size, a, b, flags=None):
+ """
+ ``float_add`` adds floating point expression ``a`` to expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``fadd.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FADD, a.index, b.index, size=size, flags=flags)
+
+[docs] def float_sub(self, size, a, b, flags=None):
+ """
+ ``float_sub`` subtracts floating point expression ``b`` from expression ``a`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``fsub.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FSUB, a.index, b.index, size=size, flags=flags)
+
+[docs] def float_mult(self, size, a, b, flags=None):
+ """
+ ``float_mult`` multiplies floating point expression ``a`` by expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``fmul.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FMUL, a.index, b.index, size=size, flags=flags)
+
+[docs] def float_div(self, size, a, b, flags=None):
+ """
+ ``float_div`` divides floating point expression ``a`` by expression ``b`` potentially setting flags ``flags``
+ and returning an expression of ``size`` bytes.
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``fdiv.<size>{<flags>}(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FDIV, a.index, b.index, size=size, flags=flags)
+
+[docs] def float_sqrt(self, size, value, flags=None):
+ """
+ ``float_sqrt`` returns square root of floating point expression ``value`` of size ``size`` potentially setting flags
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``sqrt.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FSQRT, value.index, size=size, flags=flags)
+
+[docs] def float_neg(self, size, value, flags=None):
+ """
+ ``float_neg`` returns sign negation of floating point expression ``value`` of size ``size`` potentially setting flags
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``fneg.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FNEG, value.index, size=size, flags=flags)
+
+[docs] def float_abs(self, size, value, flags=None):
+ """
+ ``float_abs`` returns absolute value of floating point expression ``value`` of size ``size`` potentially setting flags
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``fabs.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FABS, value.index, size=size, flags=flags)
+
+[docs] def float_to_int(self, size, value, flags=None):
+ """
+ ``float_to_int`` returns integer value of floating point expression ``value`` of size ``size`` potentially setting flags
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``int.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FLOAT_TO_INT, value.index, size=size, flags=flags)
+
+[docs] def int_to_float(self, size, value, flags=None):
+ """
+ ``int_to_float`` returns floating point value of integer expression ``value`` of size ``size`` potentially setting flags
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``float.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_INT_TO_FLOAT, value.index, size=size, flags=flags)
+
+[docs] def float_convert(self, size, value, flags=None):
+ """
+ ``int_to_float`` converts floating point value of expression ``value`` to size ``size`` potentially setting flags
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``fconvert.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FLOAT_CONV, value.index, size=size, flags=flags)
+
+[docs] def round_to_int(self, size, value, flags=None):
+ """
+ ``round_to_int`` rounds a floating point value to the nearest integer
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``roundint.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_ROUND_TO_INT, value.index, size=size, flags=flags)
+
+[docs] def floor(self, size, value, flags=None):
+ """
+ ``floor`` rounds a floating point value to an integer towards negative infinity
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``roundint.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FLOOR, value.index, size=size, flags=flags)
+
+[docs] def ceil(self, size, value, flags=None):
+ """
+ ``ceil`` rounds a floating point value to an integer towards positive infinity
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``roundint.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_CEIL, value.index, size=size, flags=flags)
+
+[docs] def float_trunc(self, size, value, flags=None):
+ """
+ ``float_trunc`` rounds a floating point value to an integer towards zero
+
+ :param int size: the size of the result in bytes
+ :param LowLevelILExpr value: the expression to negate
+ :param str flags: optional, flags to set
+ :return: The expression ``roundint.<size>{<flags>}(value)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FTRUNC, value.index, size=size, flags=flags)
+
+[docs] def float_compare_equal(self, size, a, b):
+ """
+ ``float_compare_equal`` returns floating point comparison expression of size ``size`` checking if
+ expression ``a`` is equal to expression ``b``
+
+ :param int size: the size of the operands in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``a f== b``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FCMP_E, a.index, b.index)
+
+[docs] def float_compare_not_equal(self, size, a, b):
+ """
+ ``float_compare_not_equal`` returns floating point comparison expression of size ``size`` checking if
+ expression ``a`` is not equal to expression ``b``
+
+ :param int size: the size of the operands in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``a f!= b``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FCMP_NE, a.index, b.index)
+
+[docs] def float_compare_less_than(self, size, a, b):
+ """
+ ``float_compare_less_than`` returns floating point comparison expression of size ``size`` checking if
+ expression ``a`` is less than to expression ``b``
+
+ :param int size: the size of the operands in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``a f< b``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FCMP_LT, a.index, b.index)
+
+[docs] def float_compare_less_equal(self, size, a, b):
+ """
+ ``float_compare_less_equal`` returns floating point comparison expression of size ``size`` checking if
+ expression ``a`` is less than or equal to expression ``b``
+
+ :param int size: the size of the operands in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``a f<= b``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FCMP_LE, a.index, b.index)
+
+[docs] def float_compare_greater_equal(self, size, a, b):
+ """
+ ``float_compare_greater_equal`` returns floating point comparison expression of size ``size`` checking if
+ expression ``a`` is greater than or equal to expression ``b``
+
+ :param int size: the size of the operands in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``a f>= b``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FCMP_GE, a.index, b.index)
+
+[docs] def float_compare_greater_than(self, size, a, b):
+ """
+ ``float_compare_greater_than`` returns floating point comparison expression of size ``size`` checking if
+ expression ``a`` is greater than or equal to expression ``b``
+
+ :param int size: the size of the operands in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``a f> b``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FCMP_GT, a.index, b.index)
+
+[docs] def float_compare_unordered(self, size, a, b):
+ """
+ ``float_compare_unordered`` returns floating point comparison expression of size ``size`` checking if
+ expression ``a`` is unordered relative to expression ``b``
+
+ :param int size: the size of the operands in bytes
+ :param LowLevelILExpr a: LHS expression
+ :param LowLevelILExpr b: RHS expression
+ :param str flags: flags to set
+ :return: The expression ``is_unordered(a, b)``
+ :rtype: LowLevelILExpr
+ """
+ return self.expr(LowLevelILOperation.LLIL_FCMP_UO, a.index, b.index)
+
+[docs] def goto(self, label):
+ """
+ ``goto`` returns a goto expression which jumps to the provided LowLevelILLabel.
+
+ :param LowLevelILLabel label: Label to jump to
+ :return: the LowLevelILExpr that jumps to the provided label
+ :rtype: LowLevelILExpr
+ """
+ return LowLevelILExpr(core.BNLowLevelILGoto(self.handle, label.handle))
+
+[docs] def if_expr(self, operand, t, f):
+ """
+ ``if_expr`` returns the ``if`` expression which depending on condition ``operand`` jumps to the LowLevelILLabel
+ ``t`` when the condition expression ``operand`` is non-zero and ``f`` when it's zero.
+
+ :param LowLevelILExpr operand: comparison expression to evaluate.
+ :param LowLevelILLabel t: Label for the true branch
+ :param LowLevelILLabel f: Label for the false branch
+ :return: the LowLevelILExpr for the if expression
+ :rtype: LowLevelILExpr
+ """
+ return LowLevelILExpr(core.BNLowLevelILIf(self.handle, operand.index, t.handle, f.handle))
+
+[docs] def mark_label(self, label):
+ """
+ ``mark_label`` assigns a LowLevelILLabel to the current IL address.
+
+ :param LowLevelILLabel label:
+ :rtype: None
+ """
+ core.BNLowLevelILMarkLabel(self.handle, label.handle)
+
+[docs] def add_label_list(self, labels):
+ """
+ ``add_label_list`` returns a label list expression for the given list of LowLevelILLabel objects.
+
+ :param list(LowLevelILLabel) lables: the list of LowLevelILLabel to get a label list expression from
+ :return: the label list expression
+ :rtype: LowLevelILExpr
+ """
+ label_list = (ctypes.POINTER(core.BNLowLevelILLabel) * len(labels))()
+ for i in range(len(labels)):
+ label_list[i] = labels[i].handle
+ return LowLevelILExpr(core.BNLowLevelILAddLabelList(self.handle, label_list, len(labels)))
+
+[docs] def add_operand_list(self, operands):
+ """
+ ``add_operand_list`` returns an operand list expression for the given list of integer operands.
+
+ :param list(int) operands: list of operand numbers
+ :return: an operand list expression
+ :rtype: LowLevelILExpr
+ """
+ operand_list = (ctypes.c_ulonglong * len(operands))()
+ for i in range(len(operands)):
+ operand_list[i] = operands[i]
+ return LowLevelILExpr(core.BNLowLevelILAddOperandList(self.handle, operand_list, len(operands)))
+
+[docs] def operand(self, n, expr):
+ """
+ ``operand`` sets the operand number of the expression ``expr`` and passes back ``expr`` without modification.
+
+ :param int n:
+ :param LowLevelILExpr expr:
+ :return: returns the expression ``expr`` unmodified
+ :rtype: LowLevelILExpr
+ """
+ core.BNLowLevelILSetExprSourceOperand(self.handle, expr.index, n)
+ return expr
+
+[docs] def finalize(self):
+ """
+ ``finalize`` ends the function and computes the list of basic blocks.
+
+ :rtype: None
+ """
+ core.BNFinalizeLowLevelILFunction(self.handle)
+
+[docs] def add_label_for_address(self, arch, addr):
+ """
+ ``add_label_for_address`` adds a low-level IL label for the given architecture ``arch`` at the given virtual
+ address ``addr``
+
+ :param Architecture arch: Architecture to add labels for
+ :param int addr: the IL address to add a label at
+ """
+ if arch is not None:
+ arch = arch.handle
+ core.BNAddLowLevelILLabelForAddress(self.handle, arch, addr)
+
+[docs] def get_label_for_address(self, arch, addr):
+ """
+ ``get_label_for_address`` returns the LowLevelILLabel for the given Architecture ``arch`` and IL address ``addr``.
+
+ :param Architecture arch:
+ :param int addr: IL Address label to retrieve
+ :return: the LowLevelILLabel for the given IL address
+ :rtype: LowLevelILLabel
+ """
+ if arch is not None:
+ arch = arch.handle
+ label = core.BNGetLowLevelILLabelForAddress(self.handle, arch, addr)
+ if label is None:
+ return None
+ return LowLevelILLabel(label)
+
+[docs] def get_ssa_instruction_index(self, instr):
+ return core.BNGetLowLevelILSSAInstructionIndex(self.handle, instr)
+
+[docs] def get_non_ssa_instruction_index(self, instr):
+ return core.BNGetLowLevelILNonSSAInstructionIndex(self.handle, instr)
+
+[docs] def get_ssa_reg_definition(self, reg_ssa):
+ reg = self.arch.get_reg_index(reg_ssa.reg)
+ result = core.BNGetLowLevelILSSARegisterDefinition(self.handle, reg, reg_ssa.version)
+ if result >= core.BNGetLowLevelILInstructionCount(self.handle):
+ return None
+ return result
+
+[docs] def get_ssa_flag_definition(self, flag_ssa):
+ flag = self.arch.get_flag_index(flag_ssa.flag)
+ result = core.BNGetLowLevelILSSAFlagDefinition(self.handle, flag, flag_ssa.version)
+ if result >= core.BNGetLowLevelILInstructionCount(self.handle):
+ return None
+ return result
+
+[docs] def get_ssa_memory_definition(self, index):
+ result = core.BNGetLowLevelILSSAMemoryDefinition(self.handle, index)
+ if result >= core.BNGetLowLevelILInstructionCount(self.handle):
+ return None
+ return result
+
+[docs] def get_ssa_reg_uses(self, reg_ssa):
+ reg = self.arch.get_reg_index(reg_ssa.reg)
+ count = ctypes.c_ulonglong()
+ instrs = core.BNGetLowLevelILSSARegisterUses(self.handle, reg, reg_ssa.version, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(instrs[i])
+ core.BNFreeILInstructionList(instrs)
+ return result
+
+[docs] def get_ssa_flag_uses(self, flag_ssa):
+ flag = self.arch.get_flag_index(flag_ssa.flag)
+ count = ctypes.c_ulonglong()
+ instrs = core.BNGetLowLevelILSSAFlagUses(self.handle, flag, flag_ssa.version, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(instrs[i])
+ core.BNFreeILInstructionList(instrs)
+ return result
+
+[docs] def get_ssa_memory_uses(self, index):
+ count = ctypes.c_ulonglong()
+ instrs = core.BNGetLowLevelILSSAMemoryUses(self.handle, index, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(instrs[i])
+ core.BNFreeILInstructionList(instrs)
+ return result
+
+[docs] def get_ssa_reg_value(self, reg_ssa):
+ reg = self.arch.get_reg_index(reg_ssa.reg)
+ value = core.BNGetLowLevelILSSARegisterValue(self.handle, reg, reg_ssa.version)
+ result = binaryninja.function.RegisterValue(self.arch, value)
+ return result
+
+[docs] def get_ssa_flag_value(self, flag_ssa):
+ flag = self.arch.get_flag_index(flag_ssa.flag)
+ value = core.BNGetLowLevelILSSAFlagValue(self.handle, flag, flag_ssa.version)
+ result = binaryninja.function.RegisterValue(self.arch, value)
+ return result
+
+[docs] def get_medium_level_il_instruction_index(self, instr):
+ med_il = self.medium_level_il
+ if med_il is None:
+ return None
+ result = core.BNGetMediumLevelILInstructionIndex(self.handle, instr)
+ if result >= core.BNGetMediumLevelILInstructionCount(med_il.handle):
+ return None
+ return result
+
+[docs] def get_medium_level_il_expr_index(self, expr):
+ med_il = self.medium_level_il
+ if med_il is None:
+ return None
+ result = core.BNGetMediumLevelILExprIndex(self.handle, expr)
+ if result >= core.BNGetMediumLevelILExprCount(med_il.handle):
+ return None
+ return result
+
+[docs] def get_mapped_medium_level_il_instruction_index(self, instr):
+ med_il = self.mapped_medium_level_il
+ if med_il is None:
+ return None
+ result = core.BNGetMappedMediumLevelILInstructionIndex(self.handle, instr)
+ if result >= core.BNGetMediumLevelILInstructionCount(med_il.handle):
+ return None
+ return result
+
+[docs] def get_mapped_medium_level_il_expr_index(self, expr):
+ med_il = self.mapped_medium_level_il
+ if med_il is None:
+ return None
+ result = core.BNGetMappedMediumLevelILExprIndex(self.handle, expr)
+ if result >= core.BNGetMediumLevelILExprCount(med_il.handle):
+ return None
+ return result
+
+[docs] def create_graph(self, settings = None):
+ if settings is not None:
+ settings_obj = settings.handle
+ else:
+ settings_obj = None
+ return binaryninja.flowgraph.CoreFlowGraph(core.BNCreateLowLevelILFunctionGraph(self.handle, settings_obj))
+
+
+[docs]class LowLevelILBasicBlock(basicblock.BasicBlock):
+[docs] def __init__(self, view, handle, owner):
+ super(LowLevelILBasicBlock, self).__init__(handle, view)
+ self.il_function = owner
+
+ def __iter__(self):
+ for idx in range(self.start, self.end):
+ yield self.il_function[idx]
+
+ def __getitem__(self, idx):
+ size = self.end - self.start
+ if idx > size or idx < -size:
+ raise IndexError("list index is out of range")
+ if idx >= 0:
+ return self.il_function[idx + self.start]
+ else:
+ return self.il_function[self.end + idx]
+
+ def _create_instance(self, handle, view):
+ """Internal method by super to instantiate child instances"""
+ return LowLevelILBasicBlock(view, handle, self.il_function)
+
+ def __hash__(self):
+ return hash((self.start, self.end, self.il_function))
+
+
+
+
+
+
+
+
+
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+from binaryninja import scriptingprovider
+from binaryninja import plugin
+
+
+[docs]def execute_on_main_thread(func):
+ action = scriptingprovider._ThreadActionContext(func)
+ obj = core.BNExecuteOnMainThread(0, action.callback)
+ if obj:
+ return plugin.MainThreadAction(obj)
+ return None
+
+
+[docs]def execute_on_main_thread_and_wait(func):
+ action = scriptingprovider._ThreadActionContext(func)
+ core.BNExecuteOnMainThreadAndWait(0, action.callback)
+
+
+[docs]def worker_enqueue(func):
+ action = scriptingprovider._ThreadActionContext(func)
+ core.BNWorkerEnqueue(0, action.callback)
+
+
+[docs]def worker_priority_enqueue(func):
+ action = scriptingprovider._ThreadActionContext(func)
+ core.BNWorkerPriorityEnqueue(0, action.callback)
+
+
+[docs]def worker_interactive_enqueue(func):
+ action = scriptingprovider._ThreadActionContext(func)
+ core.BNWorkerInteractiveEnqueue(0, action.callback)
+
+
+
+
+
+
+
+# Copyright (c) 2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import ctypes
+import struct
+
+# Binary Ninja components
+import binaryninja
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import MediumLevelILOperation, InstructionTextTokenType, ILBranchDependence
+from binaryninja import basicblock #required for MediumLevelILBasicBlock argument
+from binaryninja import function
+from binaryninja import types
+from binaryninja import lowlevelil
+
+# 2-3 compatibility
+from binaryninja import range
+
+
+[docs]class SSAVariable(object):
+
+
+ def __repr__(self):
+ return "<ssa %s version %d>" % (repr(self.var), self.version)
+
+ def __eq__(self, other):
+ return isinstance(other, SSAVariable) and (
+ (self.var, self.version) ==
+ (other.var, other.version)
+ )
+
+ def __hash__(self):
+ return hash((self.var, self.version))
+
+
+[docs]class MediumLevelILLabel(object):
+[docs] def __init__(self, handle = None):
+ if handle is None:
+ self.handle = (core.BNMediumLevelILLabel * 1)()
+ core.BNMediumLevelILInitLabel(self.handle)
+ else:
+ self.handle = handle
+
+
+[docs]class MediumLevelILOperationAndSize(object):
+
+
+ def __repr__(self):
+ if self.size == 0:
+ return "<%s>" % self.operation.name
+ return "<%s %d>" % (self.operation.name, self.size)
+
+ def __eq__(self, other):
+ if isinstance(other, MediumLevelILOperation):
+ return other == self.operation
+ if isinstance(other, MediumLevelILOperationAndSize):
+ return other.size == self.size and other.operation == self.operation
+ else:
+ return False
+
+
+[docs]class MediumLevelILInstruction(object):
+ """
+ ``class MediumLevelILInstruction`` Medium Level Intermediate Language Instructions are infinite length tree-based
+ instructions. Tree-based instructions use infix notation with the left hand operand being the destination operand.
+ Infix notation is thus more natural to read than other notations (e.g. x86 ``mov eax, 0`` vs. MLIL ``eax = 0``).
+ """
+
+ ILOperations = {
+ MediumLevelILOperation.MLIL_NOP: [],
+ MediumLevelILOperation.MLIL_SET_VAR: [("dest", "var"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_SET_VAR_FIELD: [("dest", "var"), ("offset", "int"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_SET_VAR_SPLIT: [("high", "var"), ("low", "var"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_LOAD: [("src", "expr")],
+ MediumLevelILOperation.MLIL_LOAD_STRUCT: [("src", "expr"), ("offset", "int")],
+ MediumLevelILOperation.MLIL_STORE: [("dest", "expr"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_STORE_STRUCT: [("dest", "expr"), ("offset", "int"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_VAR: [("src", "var")],
+ MediumLevelILOperation.MLIL_VAR_FIELD: [("src", "var"), ("offset", "int")],
+ MediumLevelILOperation.MLIL_VAR_SPLIT: [("high", "var"), ("low", "var")],
+ MediumLevelILOperation.MLIL_ADDRESS_OF: [("src", "var")],
+ MediumLevelILOperation.MLIL_ADDRESS_OF_FIELD: [("src", "var"), ("offset", "int")],
+ MediumLevelILOperation.MLIL_CONST: [("constant", "int")],
+ MediumLevelILOperation.MLIL_CONST_PTR: [("constant", "int")],
+ MediumLevelILOperation.MLIL_EXTERN_PTR: [("constant", "int"), ("offset", "int")],
+ MediumLevelILOperation.MLIL_FLOAT_CONST: [("constant", "float")],
+ MediumLevelILOperation.MLIL_IMPORT: [("constant", "int")],
+ MediumLevelILOperation.MLIL_ADD: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_ADC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
+ MediumLevelILOperation.MLIL_SUB: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_SBB: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
+ MediumLevelILOperation.MLIL_AND: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_OR: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_XOR: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_LSL: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_LSR: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_ASR: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_ROL: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_RLC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
+ MediumLevelILOperation.MLIL_ROR: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_RRC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
+ MediumLevelILOperation.MLIL_MUL: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_MULU_DP: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_MULS_DP: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_DIVU: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_DIVU_DP: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_DIVS: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_DIVS_DP: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_MODU: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_MODU_DP: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_MODS: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_MODS_DP: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_NEG: [("src", "expr")],
+ MediumLevelILOperation.MLIL_NOT: [("src", "expr")],
+ MediumLevelILOperation.MLIL_SX: [("src", "expr")],
+ MediumLevelILOperation.MLIL_ZX: [("src", "expr")],
+ MediumLevelILOperation.MLIL_LOW_PART: [("src", "expr")],
+ MediumLevelILOperation.MLIL_JUMP: [("dest", "expr")],
+ MediumLevelILOperation.MLIL_JUMP_TO: [("dest", "expr"), ("targets", "int_list")],
+ MediumLevelILOperation.MLIL_RET_HINT: [("dest", "expr")],
+ MediumLevelILOperation.MLIL_CALL: [("output", "var_list"), ("dest", "expr"), ("params", "expr_list")],
+ MediumLevelILOperation.MLIL_CALL_UNTYPED: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
+ MediumLevelILOperation.MLIL_CALL_OUTPUT: [("dest", "var_list")],
+ MediumLevelILOperation.MLIL_CALL_PARAM: [("src", "var_list")],
+ MediumLevelILOperation.MLIL_RET: [("src", "expr_list")],
+ MediumLevelILOperation.MLIL_NORET: [],
+ MediumLevelILOperation.MLIL_IF: [("condition", "expr"), ("true", "int"), ("false", "int")],
+ MediumLevelILOperation.MLIL_GOTO: [("dest", "int")],
+ MediumLevelILOperation.MLIL_CMP_E: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_CMP_NE: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_CMP_SLT: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_CMP_ULT: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_CMP_SLE: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_CMP_ULE: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_CMP_SGE: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_CMP_UGE: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_CMP_SGT: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_CMP_UGT: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_TEST_BIT: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_BOOL_TO_INT: [("src", "expr")],
+ MediumLevelILOperation.MLIL_ADD_OVERFLOW: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_SYSCALL: [("output", "var_list"), ("params", "expr_list")],
+ MediumLevelILOperation.MLIL_SYSCALL_UNTYPED: [("output", "expr"), ("params", "expr"), ("stack", "expr")],
+ MediumLevelILOperation.MLIL_TAILCALL: [("output", "var_list"), ("dest", "expr"), ("params", "expr_list")],
+ MediumLevelILOperation.MLIL_TAILCALL_UNTYPED: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
+ MediumLevelILOperation.MLIL_BP: [],
+ MediumLevelILOperation.MLIL_TRAP: [("vector", "int")],
+ MediumLevelILOperation.MLIL_INTRINSIC: [("output", "var_list"), ("intrinsic", "intrinsic"), ("params", "expr_list")],
+ MediumLevelILOperation.MLIL_INTRINSIC_SSA: [("output", "var_ssa_list"), ("intrinsic", "intrinsic"), ("params", "expr_list")],
+ MediumLevelILOperation.MLIL_FREE_VAR_SLOT: [("dest", "var")],
+ MediumLevelILOperation.MLIL_FREE_VAR_SLOT_SSA: [("prev", "var_ssa_dest_and_src")],
+ MediumLevelILOperation.MLIL_UNDEF: [],
+ MediumLevelILOperation.MLIL_UNIMPL: [],
+ MediumLevelILOperation.MLIL_UNIMPL_MEM: [("src", "expr")],
+ MediumLevelILOperation.MLIL_FADD: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FSUB: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FMUL: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FDIV: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FSQRT: [("src", "expr")],
+ MediumLevelILOperation.MLIL_FNEG: [("src", "expr")],
+ MediumLevelILOperation.MLIL_FABS: [("src", "expr")],
+ MediumLevelILOperation.MLIL_FLOAT_TO_INT: [("src", "expr")],
+ MediumLevelILOperation.MLIL_INT_TO_FLOAT: [("src", "expr")],
+ MediumLevelILOperation.MLIL_FLOAT_CONV: [("src", "expr")],
+ MediumLevelILOperation.MLIL_ROUND_TO_INT: [("src", "expr")],
+ MediumLevelILOperation.MLIL_FLOOR: [("src", "expr")],
+ MediumLevelILOperation.MLIL_CEIL: [("src", "expr")],
+ MediumLevelILOperation.MLIL_FTRUNC: [("src", "expr")],
+ MediumLevelILOperation.MLIL_FCMP_E: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FCMP_NE: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FCMP_LT: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FCMP_LE: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FCMP_GE: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FCMP_GT: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FCMP_O: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_FCMP_UO: [("left", "expr"), ("right", "expr")],
+ MediumLevelILOperation.MLIL_SET_VAR_SSA: [("dest", "var_ssa"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD: [("prev", "var_ssa_dest_and_src"), ("offset", "int"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA: [("high", "var_ssa"), ("low", "var_ssa"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_SET_VAR_ALIASED: [("prev", "var_ssa_dest_and_src"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD: [("prev", "var_ssa_dest_and_src"), ("offset", "int"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_VAR_SSA: [("src", "var_ssa")],
+ MediumLevelILOperation.MLIL_VAR_SSA_FIELD: [("src", "var_ssa"), ("offset", "int")],
+ MediumLevelILOperation.MLIL_VAR_ALIASED: [("src", "var_ssa")],
+ MediumLevelILOperation.MLIL_VAR_ALIASED_FIELD: [("src", "var_ssa"), ("offset", "int")],
+ MediumLevelILOperation.MLIL_VAR_SPLIT_SSA: [("high", "var_ssa"), ("low", "var_ssa")],
+ MediumLevelILOperation.MLIL_CALL_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr_list"), ("src_memory", "int")],
+ MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
+ MediumLevelILOperation.MLIL_SYSCALL_SSA: [("output", "expr"), ("params", "expr_list"), ("src_memory", "int")],
+ MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA: [("output", "expr"), ("params", "expr"), ("stack", "expr")],
+ MediumLevelILOperation.MLIL_TAILCALL_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr_list"), ("src_memory", "int")],
+ MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
+ MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA: [("dest_memory", "int"), ("dest", "var_ssa_list")],
+ MediumLevelILOperation.MLIL_CALL_PARAM_SSA: [("src_memory", "int"), ("src", "var_ssa_list")],
+ MediumLevelILOperation.MLIL_LOAD_SSA: [("src", "expr"), ("src_memory", "int")],
+ MediumLevelILOperation.MLIL_LOAD_STRUCT_SSA: [("src", "expr"), ("offset", "int"), ("src_memory", "int")],
+ MediumLevelILOperation.MLIL_STORE_SSA: [("dest", "expr"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_STORE_STRUCT_SSA: [("dest", "expr"), ("offset", "int"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
+ MediumLevelILOperation.MLIL_VAR_PHI: [("dest", "var_ssa"), ("src", "var_ssa_list")],
+ MediumLevelILOperation.MLIL_MEM_PHI: [("dest_memory", "int"), ("src_memory", "int_list")]
+ }
+
+[docs] def __init__(self, func, expr_index, instr_index=None):
+ instr = core.BNGetMediumLevelILByIndex(func.handle, expr_index)
+ self.function = func
+ self.expr_index = expr_index
+ if instr_index is None:
+ self.instr_index = core.BNGetMediumLevelILInstructionForExpr(func.handle, expr_index)
+ else:
+ self.instr_index = instr_index
+ self.operation = MediumLevelILOperation(instr.operation)
+ self.size = instr.size
+ self.address = instr.address
+ self.source_operand = instr.sourceOperand
+ operands = MediumLevelILInstruction.ILOperations[instr.operation]
+ self.operands = []
+ i = 0
+ for operand in operands:
+ name, operand_type = operand
+ if operand_type == "int":
+ value = instr.operands[i]
+ value = (value & ((1 << 63) - 1)) - (value & (1 << 63))
+ elif operand_type == "float":
+ if instr.size == 4:
+ value = struct.unpack("f", struct.pack("I", instr.operands[i] & 0xffffffff))[0]
+ elif instr.size == 8:
+ value = struct.unpack("d", struct.pack("Q", instr.operands[i]))[0]
+ else:
+ value = instr.operands[i]
+ elif operand_type == "expr":
+ value = MediumLevelILInstruction(func, instr.operands[i])
+ elif operand_type == "intrinsic":
+ value = lowlevelil.ILIntrinsic(func.arch, instr.operands[i])
+ elif operand_type == "var":
+ value = function.Variable.from_identifier(self.function.source_function, instr.operands[i])
+ elif operand_type == "var_ssa":
+ var = function.Variable.from_identifier(self.function.source_function, instr.operands[i])
+ version = instr.operands[i + 1]
+ i += 1
+ value = SSAVariable(var, version)
+ elif operand_type == "var_ssa_dest_and_src":
+ var = function.Variable.from_identifier(self.function.source_function, instr.operands[i])
+ dest_version = instr.operands[i + 1]
+ src_version = instr.operands[i + 2]
+ i += 2
+ self.operands.append(SSAVariable(var, dest_version))
+ self.dest = SSAVariable(var, dest_version)
+ value = SSAVariable(var, src_version)
+ elif operand_type == "int_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNMediumLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ value = []
+ for j in range(count.value):
+ value.append(operand_list[j])
+ core.BNMediumLevelILFreeOperandList(operand_list)
+ elif operand_type == "var_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNMediumLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = []
+ for j in range(count.value):
+ value.append(function.Variable.from_identifier(self.function.source_function, operand_list[j]))
+ core.BNMediumLevelILFreeOperandList(operand_list)
+ elif operand_type == "var_ssa_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNMediumLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = []
+ for j in range(count.value // 2):
+ var_id = operand_list[j * 2]
+ var_version = operand_list[(j * 2) + 1]
+ value.append(SSAVariable(function.Variable.from_identifier(self.function.source_function,
+ var_id), var_version))
+ core.BNMediumLevelILFreeOperandList(operand_list)
+ elif operand_type == "expr_list":
+ count = ctypes.c_ulonglong()
+ operand_list = core.BNMediumLevelILGetOperandList(func.handle, self.expr_index, i, count)
+ i += 1
+ value = []
+ for j in range(count.value):
+ value.append(MediumLevelILInstruction(func, operand_list[j]))
+ core.BNMediumLevelILFreeOperandList(operand_list)
+ self.operands.append(value)
+ self.__dict__[name] = value
+ i += 1
+
+ def __str__(self):
+ tokens = self.tokens
+ if tokens is None:
+ return "invalid"
+ result = ""
+ for token in tokens:
+ result += token.text
+ return result
+
+ def __repr__(self):
+ return "<il: %s>" % str(self)
+
+ @property
+ def tokens(self):
+ """MLIL tokens (read-only)"""
+ count = ctypes.c_ulonglong()
+ tokens = ctypes.POINTER(core.BNInstructionTextToken)()
+ if ((self.instr_index is not None) and (self.function.source_function is not None) and
+ (self.expr_index == core.BNGetMediumLevelILIndexForInstruction(self.function.handle, self.instr_index))):
+ if not core.BNGetMediumLevelILInstructionText(self.function.handle, self.function.source_function.handle,
+ self.function.arch.handle, self.instr_index, tokens, count):
+ return None
+ else:
+ if not core.BNGetMediumLevelILExprText(self.function.handle, self.function.arch.handle,
+ self.expr_index, tokens, count):
+ return None
+ result = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens, count.value)
+ core.BNFreeInstructionText(tokens, count.value)
+ return result
+
+ @property
+ def il_basic_block(self):
+ """IL basic block object containing this expression (read-only) (only available on finalized functions)"""
+ return MediumLevelILBasicBlock(self.function.source_function.view, core.BNGetMediumLevelILBasicBlockForInstruction(self.function.handle, self.instr_index), self.function)
+
+ @property
+ def ssa_form(self):
+ """SSA form of expression (read-only)"""
+ return MediumLevelILInstruction(self.function.ssa_form,
+ core.BNGetMediumLevelILSSAExprIndex(self.function.handle, self.expr_index))
+
+ @property
+ def non_ssa_form(self):
+ """Non-SSA form of expression (read-only)"""
+ return MediumLevelILInstruction(self.function.non_ssa_form,
+ core.BNGetMediumLevelILNonSSAExprIndex(self.function.handle, self.expr_index))
+
+ @property
+ def value(self):
+ """Value of expression if constant or a known value (read-only)"""
+ value = core.BNGetMediumLevelILExprValue(self.function.handle, self.expr_index)
+ result = function.RegisterValue(self.function.arch, value)
+ return result
+
+ @property
+ def possible_values(self):
+ """Possible values of expression using path-sensitive static data flow analysis (read-only)"""
+ value = core.BNGetMediumLevelILPossibleExprValues(self.function.handle, self.expr_index)
+ result = function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+ @property
+ def branch_dependence(self):
+ """Set of branching instructions that must take the true or false path to reach this instruction"""
+ count = ctypes.c_ulonglong()
+ deps = core.BNGetAllMediumLevelILBranchDependence(self.function.handle, self.instr_index, count)
+ result = {}
+ for i in range(0, count.value):
+ result[deps[i].branch] = ILBranchDependence(deps[i].dependence)
+ core.BNFreeILBranchDependenceList(deps)
+ return result
+
+ @property
+ def low_level_il(self):
+ """Low level IL form of this expression"""
+ expr = self.function.get_low_level_il_expr_index(self.expr_index)
+ if expr is None:
+ return None
+ return lowlevelil.LowLevelILInstruction(self.function.low_level_il.ssa_form, expr)
+
+ @property
+ def llil(self):
+ """Alias for low_level_il"""
+ return self.low_level_il
+
+ @property
+ def ssa_memory_version(self):
+ """Version of active memory contents in SSA form for this instruction"""
+ return core.BNGetMediumLevelILSSAMemoryVersionAtILInstruction(self.function.handle, self.instr_index)
+
+ @property
+ def prefix_operands(self):
+ """All operands in the expression tree in prefix order"""
+ result = [MediumLevelILOperationAndSize(self.operation, self.size)]
+ for operand in self.operands:
+ if isinstance(operand, MediumLevelILInstruction):
+ result += operand.prefix_operands
+ else:
+ result.append(operand)
+ return result
+
+ @property
+ def postfix_operands(self):
+ """All operands in the expression tree in postfix order"""
+ result = []
+ for operand in self.operands:
+ if isinstance(operand, MediumLevelILInstruction):
+ result += operand.postfix_operands
+ else:
+ result.append(operand)
+ result.append(MediumLevelILOperationAndSize(self.operation, self.size))
+ return result
+
+ @property
+ def vars_written(self):
+ """List of variables written by instruction"""
+ if self.operation in [MediumLevelILOperation.MLIL_SET_VAR, MediumLevelILOperation.MLIL_SET_VAR_FIELD,
+ MediumLevelILOperation.MLIL_SET_VAR_SSA, MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD,
+ MediumLevelILOperation.MLIL_SET_VAR_ALIASED, MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD,
+ MediumLevelILOperation.MLIL_VAR_PHI]:
+ return [self.dest]
+ elif self.operation in [MediumLevelILOperation.MLIL_SET_VAR_SPLIT, MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA]:
+ return [self.high, self.low]
+ elif self.operation in [MediumLevelILOperation.MLIL_CALL, MediumLevelILOperation.MLIL_SYSCALL, MediumLevelILOperation.MLIL_TAILCALL]:
+ return self.output
+ elif self.operation in [MediumLevelILOperation.MLIL_CALL_UNTYPED, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED,
+ MediumLevelILOperation.MLIL_CALL_SSA, MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA,
+ MediumLevelILOperation.MLIL_SYSCALL_SSA, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA,
+ MediumLevelILOperation.MLIL_TAILCALL_SSA, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA]:
+ return self.output.vars_written
+ elif self.operation in [MediumLevelILOperation.MLIL_CALL_OUTPUT, MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA]:
+ return self.dest
+ return []
+
+ @property
+ def vars_read(self):
+ """List of variables read by instruction"""
+ if self.operation in [MediumLevelILOperation.MLIL_SET_VAR, MediumLevelILOperation.MLIL_SET_VAR_FIELD,
+ MediumLevelILOperation.MLIL_SET_VAR_SPLIT, MediumLevelILOperation.MLIL_SET_VAR_SSA,
+ MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA, MediumLevelILOperation.MLIL_SET_VAR_ALIASED]:
+ return self.src.vars_read
+ elif self.operation in [MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD,
+ MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD]:
+ return [self.prev] + self.src.vars_read
+ elif self.operation in [MediumLevelILOperation.MLIL_CALL, MediumLevelILOperation.MLIL_SYSCALL, MediumLevelILOperation.MLIL_TAILCALL,
+ MediumLevelILOperation.MLIL_CALL_SSA, MediumLevelILOperation.MLIL_SYSCALL_SSA, MediumLevelILOperation.MLIL_TAILCALL_SSA]:
+ result = []
+ for param in self.params:
+ result += param.vars_read
+ return result
+ elif self.operation in [MediumLevelILOperation.MLIL_CALL_UNTYPED, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED,
+ MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA]:
+ return self.params.vars_read
+ elif self.operation in [MediumLevelILOperation.MLIL_CALL_PARAM, MediumLevelILOperation.MLIL_CALL_PARAM_SSA,
+ MediumLevelILOperation.MLIL_VAR_PHI]:
+ return self.src
+ elif self.operation in [MediumLevelILOperation.MLIL_CALL_OUTPUT, MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA]:
+ return []
+ result = []
+ for operand in self.operands:
+ if (isinstance(operand, function.Variable)) or (isinstance(operand, SSAVariable)):
+ result.append(operand)
+ elif isinstance(operand, MediumLevelILInstruction):
+ result += operand.vars_read
+ return result
+
+ @property
+ def expr_type(self):
+ """Type of expression"""
+ result = core.BNGetMediumLevelILExprType(self.function.handle, self.expr_index)
+ if result.type:
+ platform = None
+ if self.function.source_function:
+ platform = self.function.source_function.platform
+ return types.Type(result.type, platform = platform, confidence = result.confidence)
+ return None
+
+[docs] def get_ssa_var_possible_values(self, ssa_var):
+ var_data = core.BNVariable()
+ var_data.type = ssa_var.var.source_type
+ var_data.index = ssa_var.var.index
+ var_data.storage = ssa_var.var.storage
+ value = core.BNGetMediumLevelILPossibleSSAVarValues(self.function.handle, var_data, ssa_var.version, self.instr_index)
+ result = function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_ssa_var_version(self, var):
+ var_data = core.BNVariable()
+ var_data.type = var.source_type
+ var_data.index = var.index
+ var_data.storage = var.storage
+ return core.BNGetMediumLevelILSSAVarVersionAtILInstruction(self.function.handle, var_data, self.instr_index)
+
+[docs] def get_var_for_reg(self, reg):
+ reg = self.function.arch.get_reg_index(reg)
+ result = core.BNGetMediumLevelILVariableForRegisterAtInstruction(self.function.handle, reg, self.instr_index)
+ return function.Variable(self.function.source_function, result.type, result.index, result.storage)
+
+[docs] def get_var_for_flag(self, flag):
+ flag = self.function.arch.get_flag_index(flag)
+ result = core.BNGetMediumLevelILVariableForFlagAtInstruction(self.function.handle, flag, self.instr_index)
+ return function.Variable(self.function.source_function, result.type, result.index, result.storage)
+
+[docs] def get_var_for_stack_location(self, offset):
+ result = core.BNGetMediumLevelILVariableForStackLocationAtInstruction(self.function.handle, offset, self.instr_index)
+ return function.Variable(self.function.source_function, result.type, result.index, result.storage)
+
+[docs] def get_reg_value(self, reg):
+ reg = self.function.arch.get_reg_index(reg)
+ value = core.BNGetMediumLevelILRegisterValueAtInstruction(self.function.handle, reg, self.instr_index)
+ result = function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_reg_value_after(self, reg):
+ reg = self.function.arch.get_reg_index(reg)
+ value = core.BNGetMediumLevelILRegisterValueAfterInstruction(self.function.handle, reg, self.instr_index)
+ result = function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_possible_reg_values(self, reg):
+ reg = self.function.arch.get_reg_index(reg)
+ value = core.BNGetMediumLevelILPossibleRegisterValuesAtInstruction(self.function.handle, reg, self.instr_index)
+ result = function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_possible_reg_values_after(self, reg):
+ reg = self.function.arch.get_reg_index(reg)
+ value = core.BNGetMediumLevelILPossibleRegisterValuesAfterInstruction(self.function.handle, reg, self.instr_index)
+ result = function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_flag_value(self, flag):
+ flag = self.function.arch.get_flag_index(flag)
+ value = core.BNGetMediumLevelILFlagValueAtInstruction(self.function.handle, flag, self.instr_index)
+ result = function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_flag_value_after(self, flag):
+ flag = self.function.arch.get_flag_index(flag)
+ value = core.BNGetMediumLevelILFlagValueAfterInstruction(self.function.handle, flag, self.instr_index)
+ result = function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_possible_flag_values(self, flag):
+ flag = self.function.arch.get_flag_index(flag)
+ value = core.BNGetMediumLevelILPossibleFlagValuesAtInstruction(self.function.handle, flag, self.instr_index)
+ result = function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_possible_flag_values_after(self, flag):
+ flag = self.function.arch.get_flag_index(flag)
+ value = core.BNGetMediumLevelILPossibleFlagValuesAfterInstruction(self.function.handle, flag, self.instr_index)
+ result = function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_stack_contents(self, offset, size):
+ value = core.BNGetMediumLevelILStackContentsAtInstruction(self.function.handle, offset, size, self.instr_index)
+ result = function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_stack_contents_after(self, offset, size):
+ value = core.BNGetMediumLevelILStackContentsAfterInstruction(self.function.handle, offset, size, self.instr_index)
+ result = function.RegisterValue(self.function.arch, value)
+ return result
+
+[docs] def get_possible_stack_contents(self, offset, size):
+ value = core.BNGetMediumLevelILPossibleStackContentsAtInstruction(self.function.handle, offset, size, self.instr_index)
+ result = function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_possible_stack_contents_after(self, offset, size):
+ value = core.BNGetMediumLevelILPossibleStackContentsAfterInstruction(self.function.handle, offset, size, self.instr_index)
+ result = function.PossibleValueSet(self.function.arch, value)
+ core.BNFreePossibleValueSet(value)
+ return result
+
+[docs] def get_branch_dependence(self, branch_instr):
+ return ILBranchDependence(core.BNGetMediumLevelILBranchDependence(self.function.handle, self.instr_index, branch_instr))
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+
+[docs]class MediumLevelILExpr(object):
+ """
+ ``class MediumLevelILExpr`` hold the index of IL Expressions.
+
+ .. note:: This class shouldn't be instantiated directly. Rather the helper members of MediumLevelILFunction should be \
+ used instead.
+ """
+
+
+
+[docs]class MediumLevelILFunction(object):
+ """
+ ``class MediumLevelILFunction`` contains the list of MediumLevelILExpr objects that make up a binaryninja.function. MediumLevelILExpr
+ objects can be added to the MediumLevelILFunction by calling ``append`` and passing the result of the various class
+ methods which return MediumLevelILExpr objects.
+ """
+[docs] def __init__(self, arch = None, handle = None, source_func = None):
+ self.arch = arch
+ self.source_function = source_func
+ if handle is not None:
+ self.handle = core.handle_of_type(handle, core.BNMediumLevelILFunction)
+ if self.source_function is None:
+ self.source_function = binaryninja.function.Function(handle = core.BNGetMediumLevelILOwnerFunction(self.handle))
+ if self.arch is None:
+ self.arch = self.source_function.arch
+ else:
+ if self.source_function is None:
+ self.handle = None
+ raise ValueError("IL functions must be created with an associated function")
+ if self.arch is None:
+ self.arch = self.source_function.arch
+ func_handle = self.source_function.handle
+ self.handle = core.BNCreateMediumLevelILFunction(arch.handle, func_handle)
+
+ def __del__(self):
+ if self.handle is not None:
+ core.BNFreeMediumLevelILFunction(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, MediumLevelILFunction):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, MediumLevelILFunction):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def current_address(self):
+ """Current IL Address (read/write)"""
+ return core.BNMediumLevelILGetCurrentAddress(self.handle)
+
+ @current_address.setter
+ def current_address(self, value):
+ core.BNMediumLevelILSetCurrentAddress(self.handle, self.arch.handle, value)
+
+[docs] def set_current_address(self, value, arch = None):
+ if arch is None:
+ arch = self.arch
+ core.BNMediumLevelILSetCurrentAddress(self.handle, arch.handle, value)
+
+ @property
+ def basic_blocks(self):
+ """list of MediumLevelILBasicBlock objects (read-only)"""
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetMediumLevelILBasicBlockList(self.handle, count)
+ result = []
+ view = None
+ if self.source_function is not None:
+ view = self.source_function.view
+ for i in range(0, count.value):
+ result.append(MediumLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self))
+ core.BNFreeBasicBlockList(blocks, count.value)
+ return result
+
+ @property
+ def instructions(self):
+ """A generator of mlil instructions of the current function"""
+ for block in self.basic_blocks:
+ for i in block:
+ yield i
+
+ @property
+ def ssa_form(self):
+ """Medium level IL in SSA form (read-only)"""
+ result = core.BNGetMediumLevelILSSAForm(self.handle)
+ if not result:
+ return None
+ return MediumLevelILFunction(self.arch, result, self.source_function)
+
+ @property
+ def non_ssa_form(self):
+ """Medium level IL in non-SSA (default) form (read-only)"""
+ result = core.BNGetMediumLevelILNonSSAForm(self.handle)
+ if not result:
+ return None
+ return MediumLevelILFunction(self.arch, result, self.source_function)
+
+ @property
+ def low_level_il(self):
+ """Low level IL for this function"""
+ result = core.BNGetLowLevelILForMediumLevelIL(self.handle)
+ if not result:
+ return None
+ return lowlevelil.LowLevelILFunction(self.arch, result, self.source_function)
+
+ @property
+ def llil(self):
+ """Alias for low_level_il"""
+ return self.low_level_il
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __len__(self):
+ return int(core.BNGetMediumLevelILInstructionCount(self.handle))
+
+ def __getitem__(self, i):
+ if isinstance(i, slice) or isinstance(i, tuple):
+ raise IndexError("expected integer instruction index")
+ if isinstance(i, MediumLevelILExpr):
+ return MediumLevelILInstruction(self, i.index)
+ if (i < 0) or (i >= len(self)):
+ raise IndexError("index out of range")
+ return MediumLevelILInstruction(self, core.BNGetMediumLevelILIndexForInstruction(self.handle, i), i)
+
+ def __setitem__(self, i, j):
+ raise IndexError("instruction modification not implemented")
+
+ def __iter__(self):
+ count = ctypes.c_ulonglong()
+ blocks = core.BNGetMediumLevelILBasicBlockList(self.handle, count)
+ view = None
+ if self.source_function is not None:
+ view = self.source_function.view
+ try:
+ for i in range(0, count.value):
+ yield MediumLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self)
+ finally:
+ core.BNFreeBasicBlockList(blocks, count.value)
+
+[docs] def get_instruction_start(self, addr, arch = None):
+ if arch is None:
+ arch = self.arch
+ result = core.BNMediumLevelILGetInstructionStart(self.handle, arch.handle, addr)
+ if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
+ return None
+ return result
+
+[docs] def expr(self, operation, a = 0, b = 0, c = 0, d = 0, e = 0, size = 0):
+ if isinstance(operation, str):
+ operation = MediumLevelILOperation[operation]
+ elif isinstance(operation, MediumLevelILOperation):
+ operation = operation.value
+ return MediumLevelILExpr(core.BNMediumLevelILAddExpr(self.handle, operation, size, a, b, c, d, e))
+
+[docs] def append(self, expr):
+ """
+ ``append`` adds the MediumLevelILExpr ``expr`` to the current MediumLevelILFunction.
+
+ :param MediumLevelILExpr expr: the MediumLevelILExpr to add to the current MediumLevelILFunction
+ :return: number of MediumLevelILExpr in the current function
+ :rtype: int
+ """
+ return core.BNMediumLevelILAddInstruction(self.handle, expr.index)
+
+[docs] def goto(self, label):
+ """
+ ``goto`` returns a goto expression which jumps to the provided MediumLevelILLabel.
+
+ :param MediumLevelILLabel label: Label to jump to
+ :return: the MediumLevelILExpr that jumps to the provided label
+ :rtype: MediumLevelILExpr
+ """
+ return MediumLevelILExpr(core.BNMediumLevelILGoto(self.handle, label.handle))
+
+[docs] def if_expr(self, operand, t, f):
+ """
+ ``if_expr`` returns the ``if`` expression which depending on condition ``operand`` jumps to the MediumLevelILLabel
+ ``t`` when the condition expression ``operand`` is non-zero and ``f`` when it's zero.
+
+ :param MediumLevelILExpr operand: comparison expression to evaluate.
+ :param MediumLevelILLabel t: Label for the true branch
+ :param MediumLevelILLabel f: Label for the false branch
+ :return: the MediumLevelILExpr for the if expression
+ :rtype: MediumLevelILExpr
+ """
+ return MediumLevelILExpr(core.BNMediumLevelILIf(self.handle, operand.index, t.handle, f.handle))
+
+[docs] def mark_label(self, label):
+ """
+ ``mark_label`` assigns a MediumLevelILLabel to the current IL address.
+
+ :param MediumLevelILLabel label:
+ :rtype: None
+ """
+ core.BNMediumLevelILMarkLabel(self.handle, label.handle)
+
+[docs] def add_label_list(self, labels):
+ """
+ ``add_label_list`` returns a label list expression for the given list of MediumLevelILLabel objects.
+
+ :param list(MediumLevelILLabel) labels: the list of MediumLevelILLabel to get a label list expression from
+ :return: the label list expression
+ :rtype: MediumLevelILExpr
+ """
+ label_list = (ctypes.POINTER(core.BNMediumLevelILLabel) * len(labels))()
+ for i in range(len(labels)):
+ label_list[i] = labels[i].handle
+ return MediumLevelILExpr(core.BNMediumLevelILAddLabelList(self.handle, label_list, len(labels)))
+
+[docs] def add_operand_list(self, operands):
+ """
+ ``add_operand_list`` returns an operand list expression for the given list of integer operands.
+
+ :param list(int) operands: list of operand numbers
+ :return: an operand list expression
+ :rtype: MediumLevelILExpr
+ """
+ operand_list = (ctypes.c_ulonglong * len(operands))()
+ for i in range(len(operands)):
+ operand_list[i] = operands[i]
+ return MediumLevelILExpr(core.BNMediumLevelILAddOperandList(self.handle, operand_list, len(operands)))
+
+[docs] def operand(self, n, expr):
+ """
+ ``operand`` sets the operand number of the expression ``expr`` and passes back ``expr`` without modification.
+
+ :param int n:
+ :param MediumLevelILExpr expr:
+ :return: returns the expression ``expr`` unmodified
+ :rtype: MediumLevelILExpr
+ """
+ core.BNMediumLevelILSetExprSourceOperand(self.handle, expr.index, n)
+ return expr
+
+[docs] def finalize(self):
+ """
+ ``finalize`` ends the function and computes the list of basic blocks.
+
+ :rtype: None
+ """
+ core.BNFinalizeMediumLevelILFunction(self.handle)
+
+[docs] def get_ssa_instruction_index(self, instr):
+ return core.BNGetMediumLevelILSSAInstructionIndex(self.handle, instr)
+
+[docs] def get_non_ssa_instruction_index(self, instr):
+ return core.BNGetMediumLevelILNonSSAInstructionIndex(self.handle, instr)
+
+[docs] def get_ssa_var_definition(self, ssa_var):
+ var_data = core.BNVariable()
+ var_data.type = ssa_var.var.source_type
+ var_data.index = ssa_var.var.index
+ var_data.storage = ssa_var.var.storage
+ result = core.BNGetMediumLevelILSSAVarDefinition(self.handle, var_data, ssa_var.version)
+ if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
+ return None
+ return result
+
+[docs] def get_ssa_memory_definition(self, version):
+ result = core.BNGetMediumLevelILSSAMemoryDefinition(self.handle, version)
+ if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
+ return None
+ return result
+
+[docs] def get_ssa_var_uses(self, ssa_var):
+ count = ctypes.c_ulonglong()
+ var_data = core.BNVariable()
+ var_data.type = ssa_var.var.source_type
+ var_data.index = ssa_var.var.index
+ var_data.storage = ssa_var.var.storage
+ instrs = core.BNGetMediumLevelILSSAVarUses(self.handle, var_data, ssa_var.version, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(instrs[i])
+ core.BNFreeILInstructionList(instrs)
+ return result
+
+[docs] def get_ssa_memory_uses(self, version):
+ count = ctypes.c_ulonglong()
+ instrs = core.BNGetMediumLevelILSSAMemoryUses(self.handle, version, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(instrs[i])
+ core.BNFreeILInstructionList(instrs)
+ return result
+
+[docs] def is_ssa_var_live(self, ssa_var):
+ """
+ ``is_ssa_var_live`` determines if ``ssa_var`` is live at any point in the function
+
+ :param SSAVariable ssa_var: the SSA variable to query
+ :return: whether the variable is live at any point in the function
+ :rtype: bool
+ """
+ var_data = core.BNVariable()
+ var_data.type = ssa_var.var.source_type
+ var_data.index = ssa_var.var.index
+ var_data.storage = ssa_var.var.storage
+ return core.BNIsMediumLevelILSSAVarLive(self.handle, var_data, ssa_var.version)
+
+[docs] def get_var_definitions(self, var):
+ count = ctypes.c_ulonglong()
+ var_data = core.BNVariable()
+ var_data.type = var.source_type
+ var_data.index = var.index
+ var_data.storage = var.storage
+ instrs = core.BNGetMediumLevelILVariableDefinitions(self.handle, var_data, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(instrs[i])
+ core.BNFreeILInstructionList(instrs)
+ return result
+
+[docs] def get_var_uses(self, var):
+ count = ctypes.c_ulonglong()
+ var_data = core.BNVariable()
+ var_data.type = var.source_type
+ var_data.index = var.index
+ var_data.storage = var.storage
+ instrs = core.BNGetMediumLevelILVariableUses(self.handle, var_data, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(instrs[i])
+ core.BNFreeILInstructionList(instrs)
+ return result
+
+[docs] def get_ssa_var_value(self, ssa_var):
+ var_data = core.BNVariable()
+ var_data.type = ssa_var.var.source_type
+ var_data.index = ssa_var.var.index
+ var_data.storage = ssa_var.var.storage
+ value = core.BNGetMediumLevelILSSAVarValue(self.handle, var_data, ssa_var.version)
+ result = function.RegisterValue(self.arch, value)
+ return result
+
+[docs] def get_low_level_il_instruction_index(self, instr):
+ low_il = self.low_level_il
+ if low_il is None:
+ return None
+ low_il = low_il.ssa_form
+ if low_il is None:
+ return None
+ result = core.BNGetLowLevelILInstructionIndex(self.handle, instr)
+ if result >= core.BNGetLowLevelILInstructionCount(low_il.handle):
+ return None
+ return result
+
+[docs] def get_low_level_il_expr_index(self, expr):
+ low_il = self.low_level_il
+ if low_il is None:
+ return None
+ low_il = low_il.ssa_form
+ if low_il is None:
+ return None
+ result = core.BNGetLowLevelILExprIndex(self.handle, expr)
+ if result >= core.BNGetLowLevelILExprCount(low_il.handle):
+ return None
+ return result
+
+[docs] def create_graph(self, settings = None):
+ if settings is not None:
+ settings_obj = settings.handle
+ else:
+ settings_obj = None
+ return binaryninja.flowgraph.CoreFlowGraph(core.BNCreateMediumLevelILFunctionGraph(self.handle, settings_obj))
+
+
+[docs]class MediumLevelILBasicBlock(basicblock.BasicBlock):
+[docs] def __init__(self, view, handle, owner):
+ super(MediumLevelILBasicBlock, self).__init__(handle, view)
+ self.il_function = owner
+
+ def __iter__(self):
+ for idx in range(self.start, self.end):
+ yield self.il_function[idx]
+
+ def __getitem__(self, idx):
+ size = self.end - self.start
+ if idx > size or idx < -size:
+ raise IndexError("list index is out of range")
+ if idx >= 0:
+ return self.il_function[idx + self.start]
+ else:
+ return self.il_function[self.end + idx]
+
+ def _create_instance(self, handle, view):
+ """Internal method by super to instantiate child instances"""
+ return MediumLevelILBasicBlock(view, handle, self.il_function)
+
+ def __hash__(self):
+ return hash((self.start, self.end, self.il_function))
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+from __future__ import absolute_import
+import ctypes
+import numbers
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import MetadataType
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import pyNativeStr
+
+
+[docs]class Metadata(object):
+[docs] def __init__(self, value=None, signed=None, raw=None, handle=None):
+ if handle is not None:
+ self.handle = handle
+ elif isinstance(value, numbers.Integral):
+ if signed:
+ self.handle = core.BNCreateMetadataSignedIntegerData(value)
+ else:
+ self.handle = core.BNCreateMetadataUnsignedIntegerData(value)
+ elif isinstance(value, bool):
+ self.handle = core.BNCreateMetadataBooleanData(value)
+ elif isinstance(value, (str, bytes)):
+ if raw:
+ if isinstance(value, str):
+ value = bytes(bytearray(ord(i) for i in value))
+ buffer = (ctypes.c_ubyte * len(value)).from_buffer_copy(value)
+ self.handle = core.BNCreateMetadataRawData(buffer, len(value))
+ else:
+ self.handle = core.BNCreateMetadataStringData(value)
+ elif isinstance(value, float):
+ self.handle = core.BNCreateMetadataDoubleData(value)
+ elif isinstance(value, (list, tuple)):
+ self.handle = core.BNCreateMetadataOfType(MetadataType.ArrayDataType)
+ for elm in value:
+ md = Metadata(elm, signed, raw)
+ core.BNMetadataArrayAppend(self.handle, md.handle)
+ elif isinstance(value, dict):
+ self.handle = core.BNCreateMetadataOfType(MetadataType.KeyValueDataType)
+ for elm in value:
+ md = Metadata(value[elm], signed, raw)
+ core.BNMetadataSetValueForKey(self.handle, str(elm), md.handle)
+ else:
+ raise ValueError("{} doesn't contain type of: int, bool, str, float, list, dict".format(type(value).__name__))
+
+ @property
+ def value(self):
+ if self.is_integer:
+ return int(self)
+ elif self.is_string:
+ return str(self)
+ elif self.is_raw:
+ return bytes(self)
+ elif self.is_float:
+ return float(self)
+ elif self.is_boolean:
+ return bool(self)
+ elif self.is_array:
+ return list(self)
+ elif self.is_dict:
+ return self.get_dict()
+ raise TypeError()
+
+[docs] def get_dict(self):
+ if not self.is_dict:
+ raise TypeError()
+ result = {}
+ for key in self:
+ result[key] = self[key]
+ return result
+
+ @property
+ def type(self):
+ return MetadataType(core.BNMetadataGetType(self.handle))
+
+ @property
+ def is_integer(self):
+ return self.is_signed_integer or self.is_unsigned_integer
+
+ @property
+ def is_signed_integer(self):
+ return core.BNMetadataIsSignedInteger(self.handle)
+
+ @property
+ def is_unsigned_integer(self):
+ return core.BNMetadataIsUnsignedInteger(self.handle)
+
+ @property
+ def is_float(self):
+ return core.BNMetadataIsDouble(self.handle)
+
+ @property
+ def is_boolean(self):
+ return core.BNMetadataIsBoolean(self.handle)
+
+ @property
+ def is_string(self):
+ return core.BNMetadataIsString(self.handle)
+
+ @property
+ def is_raw(self):
+ return core.BNMetadataIsRaw(self.handle)
+
+ @property
+ def is_array(self):
+ return core.BNMetadataIsArray(self.handle)
+
+ @property
+ def is_dict(self):
+ return core.BNMetadataIsKeyValueStore(self.handle)
+
+[docs] def remove(self, key_or_index):
+ if isinstance(key_or_index, str) and self.is_dict:
+ core.BNMetadataRemoveKey(self.handle, key_or_index)
+ elif isinstance(key_or_index, int) and self.is_array:
+ core.BNMetadataRemoveIndex(self.handle, key_or_index)
+ else:
+ raise TypeError("remove only valid for dict and array objects")
+
+ def __len__(self):
+ if self.is_array or self.is_dict or self.is_string or self.is_raw:
+ return core.BNMetadataSize(self.handle)
+ raise Exception("Metadata object doesn't support len()")
+
+ def __iter__(self):
+ if self.is_array:
+ for i in range(core.BNMetadataSize(self.handle)):
+ yield Metadata(handle=core.BNMetadataGetForIndex(self.handle, i)).value
+ elif self.is_dict:
+ result = core.BNMetadataGetValueStore(self.handle)
+ try:
+ for i in range(result.contents.size):
+ if isinstance(result.contents.keys[i], bytes):
+ yield str(pyNativeStr(result.contents.keys[i]))
+ else:
+ yield result.contents.keys[i]
+ finally:
+ core.BNFreeMetadataValueStore(result)
+ else:
+ raise Exception("Metadata object doesn't support iteration")
+
+ def __getitem__(self, value):
+ if self.is_array:
+ if not isinstance(value, int):
+ raise ValueError("Metadata object only supports integers for indexing")
+ if value >= len(self):
+ raise IndexError("Index value out of range")
+ return Metadata(handle=core.BNMetadataGetForIndex(self.handle, value)).value
+ if self.is_dict:
+ if not isinstance(value, str):
+ raise ValueError("Metadata object only supports strings for indexing")
+ handle = core.BNMetadataGetForKey(self.handle, value)
+ if handle is None:
+ raise KeyError(value)
+ return Metadata(handle=handle).value
+
+ raise NotImplementedError("Metadata object doesn't support indexing")
+
+ def __str__(self):
+ if self.is_string:
+ return str(core.BNMetadataGetString(self.handle))
+ if self.is_raw:
+ length = ctypes.c_ulonglong()
+ length.value = 0
+ native_list = core.BNMetadataGetRaw(self.handle, ctypes.byref(length))
+ out_list = []
+ for i in range(length.value):
+ out_list.append(native_list[i])
+ core.BNFreeMetadataRaw(native_list)
+ return ''.join(chr(a) for a in out_list)
+
+ raise ValueError("Metadata object not a string or raw type")
+
+ def __bytes__(self):
+ return bytes(bytearray(ord(i) for i in self.__str__()))
+
+ def __int__(self):
+ if self.is_signed_integer:
+ return core.BNMetadataGetSignedInteger(self.handle)
+ if self.is_unsigned_integer:
+ return core.BNMetadataGetUnsignedInteger(self.handle)
+
+ raise ValueError("Metadata object not of integer type")
+
+ def __float__(self):
+ if not self.is_float:
+ raise ValueError("Metadata object is not float type")
+ return core.BNMetadataGetDouble(self.handle)
+
+ def __nonzero__(self):
+ if not self.is_boolean:
+ raise ValueError("Metadata object is not boolean type")
+ return core.BNMetadataGetBoolean(self.handle)
+
+ def __eq__(self, other):
+ if isinstance(other, int) and self.is_integer:
+ return int(self) == other
+ elif isinstance(other, str) and (self.is_string or self.is_raw):
+ return str(self) == other
+ elif isinstance(other, float) and self.is_float:
+ return float(self) == other
+ elif isinstance(other, bool) and self.is_boolean:
+ return bool(self) == other
+ elif self.is_array and ((isinstance(other, Metadata) and other.is_array) or isinstance(other, list)):
+ if len(self) != len(other):
+ return False
+ for a, b in zip(self, other):
+ if a != b:
+ return False
+ return True
+ elif self.is_dict and ((isinstance(other, Metadata) and other.is_dict) or isinstance(other, dict)):
+ if len(self) != len(other):
+ return False
+ for a, b in zip(self, other):
+ if a != b or self[a] != other[b]:
+ return False
+ return True
+ elif isinstance(other, Metadata) and self.is_integer and other.is_integer:
+ return int(self) == int(other)
+ elif isinstance(other, Metadata) and (self.is_string or self.is_raw) and (other.is_string or other.is_raw):
+ return str(self) == str(other)
+ elif isinstance(other, Metadata) and self.is_float and other.is_float:
+ return float(self) == float(other)
+ elif isinstance(other, Metadata) and self.is_boolean and other.is_boolean:
+ return bool(self) == bool(other)
+ raise NotImplementedError()
+
+ def __ne__(self, other):
+ if isinstance(other, int) and self.is_integer:
+ return int(self) != other
+ elif isinstance(other, str) and (self.is_string or self.is_raw):
+ return str(self) != other
+ elif isinstance(other, float) and self.is_float:
+ return float(self) != other
+ elif isinstance(other, bool):
+ return bool(self) != other
+ elif self.is_array and ((isinstance(other, Metadata) and other.is_array) or isinstance(other, list)):
+ if len(self) != len(other):
+ return True
+ areEqual = True
+ for a, b in zip(self, other):
+ if a != b:
+ areEqual = False
+ return not areEqual
+ elif self.is_dict and ((isinstance(other, Metadata) and other.is_dict) or isinstance(other, dict)):
+ if len(self) != len(other):
+ return True
+ for a, b in zip(self, other):
+ if a != b or self[a] != other[b]:
+ return True
+ return False
+ elif isinstance(other, Metadata) and self.is_integer and other.is_integer:
+ return int(self) != int(other)
+ elif isinstance(other, Metadata) and (self.is_string or self.is_raw) and (other.is_string or other.is_raw):
+ return str(self) != str(other)
+ elif isinstance(other, Metadata) and self.is_float and other.is_float:
+ return float(self) != float(other)
+ elif isinstance(other, Metadata) and self.is_boolean and other.is_boolean:
+ return bool(self) != bool(other)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import ctypes
+
+# Binary Ninja components
+import binaryninja
+from binaryninja import _binaryninjacore as core
+from binaryninja import types
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import with_metaclass
+
+
+class _PlatformMetaClass(type):
+
+ @property
+ def list(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ platforms = core.BNGetPlatformList(count)
+ result = []
+ for i in range(0, count.value):
+ result.append(Platform(handle = core.BNNewPlatformReference(platforms[i])))
+ core.BNFreePlatformList(platforms, count.value)
+ return result
+
+ @property
+ def os_list(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ platforms = core.BNGetPlatformOSList(count)
+ result = []
+ for i in range(0, count.value):
+ result.append(str(platforms[i]))
+ core.BNFreePlatformOSList(platforms, count.value)
+ return result
+
+ def __iter__(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ platforms = core.BNGetPlatformList(count)
+ try:
+ for i in range(0, count.value):
+ yield Platform(handle = core.BNNewPlatformReference(platforms[i]))
+ finally:
+ core.BNFreePlatformList(platforms, count.value)
+
+ def __setattr__(self, name, value):
+ try:
+ type.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __getitem__(cls, value):
+ binaryninja._init_plugins()
+ platform = core.BNGetPlatformByName(str(value))
+ if platform is None:
+ raise KeyError("'%s' is not a valid platform" % str(value))
+ return Platform(handle = platform)
+
+ def get_list(cls, os = None, arch = None):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ if os is None:
+ platforms = core.BNGetPlatformList(count)
+ elif arch is None:
+ platforms = core.BNGetPlatformListByOS(os)
+ else:
+ platforms = core.BNGetPlatformListByArchitecture(os, arch.handle)
+ result = []
+ for i in range(0, count.value):
+ result.append(Platform(handle = core.BNNewPlatformReference(platforms[i])))
+ core.BNFreePlatformList(platforms, count.value)
+ return result
+
+
+[docs]class Platform(with_metaclass(_PlatformMetaClass, object)):
+ """
+ ``class Platform`` contains all information related to the execution environment of the binary, mainly the
+ calling conventions used.
+ """
+ name = None
+
+[docs] def __init__(self, arch = None, handle = None):
+ if handle is None:
+ if arch is None:
+ self.handle = None
+ raise ValueError("platform must have an associated architecture")
+ self.arch = arch
+ self.handle = core.BNCreatePlatform(arch.handle, self.__class__.name)
+ else:
+ self.handle = handle
+ self.__dict__["name"] = core.BNGetPlatformName(self.handle)
+ self.arch = binaryninja.architecture.CoreArchitecture._from_cache(core.BNGetPlatformArchitecture(self.handle))
+
+ def __del__(self):
+ if self.handle is not None:
+ core.BNFreePlatform(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, Platform):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, Platform):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def list(self):
+ """Allow tab completion to discover metaclass list property"""
+ pass
+
+ @property
+ def default_calling_convention(self):
+ """
+ Default calling convention.
+
+ :getter: returns a CallingConvention object for the default calling convention.
+ :setter: sets the default calling convention
+ :type: CallingConvention
+ """
+ result = core.BNGetPlatformDefaultCallingConvention(self.handle)
+ if result is None:
+ return None
+ return binaryninja.callingconvention.CallingConvention(handle=result)
+
+ @default_calling_convention.setter
+ def default_calling_convention(self, value):
+ core.BNRegisterPlatformDefaultCallingConvention(self.handle, value.handle)
+
+ @property
+ def cdecl_calling_convention(self):
+ """
+ Cdecl calling convention.
+
+ :getter: returns a CallingConvention object for the cdecl calling convention.
+ :setter sets the cdecl calling convention
+ :type: CallingConvention
+ """
+ result = core.BNGetPlatformCdeclCallingConvention(self.handle)
+ if result is None:
+ return None
+ return binaryninja.callingconvention.CallingConvention(handle=result)
+
+ @cdecl_calling_convention.setter
+ def cdecl_calling_convention(self, value):
+ core.BNRegisterPlatformCdeclCallingConvention(self.handle, value.handle)
+
+ @property
+ def stdcall_calling_convention(self):
+ """
+ Stdcall calling convention.
+
+ :getter: returns a CallingConvention object for the stdcall calling convention.
+ :setter sets the stdcall calling convention
+ :type: CallingConvention
+ """
+ result = core.BNGetPlatformStdcallCallingConvention(self.handle)
+ if result is None:
+ return None
+ return binaryninja.callingconvention.CallingConvention(handle=result)
+
+ @stdcall_calling_convention.setter
+ def stdcall_calling_convention(self, value):
+ core.BNRegisterPlatformStdcallCallingConvention(self.handle, value.handle)
+
+ @property
+ def fastcall_calling_convention(self):
+ """
+ Fastcall calling convention.
+
+ :getter: returns a CallingConvention object for the fastcall calling convention.
+ :setter sets the fastcall calling convention
+ :type: CallingConvention
+ """
+ result = core.BNGetPlatformFastcallCallingConvention(self.handle)
+ if result is None:
+ return None
+ return binaryninja.callingconvention.CallingConvention(handle=result)
+
+ @fastcall_calling_convention.setter
+ def fastcall_calling_convention(self, value):
+ core.BNRegisterPlatformFastcallCallingConvention(self.handle, value.handle)
+
+ @property
+ def system_call_convention(self):
+ """
+ System call convention.
+
+ :getter: returns a CallingConvention object for the system call convention.
+ :setter sets the system call convention
+ :type: CallingConvention
+ """
+ result = core.BNGetPlatformSystemCallConvention(self.handle)
+ if result is None:
+ return None
+ return binaryninja.callingconvention.CallingConvention(handle=result)
+
+ @system_call_convention.setter
+ def system_call_convention(self, value):
+ core.BNSetPlatformSystemCallConvention(self.handle, value.handle)
+
+ @property
+ def calling_conventions(self):
+ """
+ List of platform CallingConvention objects (read-only)
+
+ :getter: returns the list of supported CallingConvention objects
+ :type: list(CallingConvention)
+ """
+ count = ctypes.c_ulonglong()
+ cc = core.BNGetPlatformCallingConventions(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(binaryninja.callingconvention.CallingConvention(handle=core.BNNewCallingConventionReference(cc[i])))
+ core.BNFreeCallingConventionList(cc, count.value)
+ return result
+
+ @property
+ def types(self):
+ """List of platform-specific types (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ type_list = core.BNGetPlatformTypes(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ name = types.QualifiedName._from_core_struct(type_list[i].name)
+ result[name] = types.Type(core.BNNewTypeReference(type_list[i].type), platform = self)
+ core.BNFreeTypeList(type_list, count.value)
+ return result
+
+ @property
+ def variables(self):
+ """List of platform-specific variable definitions (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ type_list = core.BNGetPlatformVariables(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ name = types.QualifiedName._from_core_struct(type_list[i].name)
+ result[name] = types.Type(core.BNNewTypeReference(type_list[i].type), platform = self)
+ core.BNFreeTypeList(type_list, count.value)
+ return result
+
+ @property
+ def functions(self):
+ """List of platform-specific function definitions (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ type_list = core.BNGetPlatformFunctions(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ name = types.QualifiedName._from_core_struct(type_list[i].name)
+ result[name] = types.Type(core.BNNewTypeReference(type_list[i].type), platform = self)
+ core.BNFreeTypeList(type_list, count.value)
+ return result
+
+ @property
+ def system_calls(self):
+ """List of system calls for this platform (read-only)"""
+ count = ctypes.c_ulonglong(0)
+ call_list = core.BNGetPlatformSystemCalls(self.handle, count)
+ result = {}
+ for i in range(0, count.value):
+ name = types.QualifiedName._from_core_struct(call_list[i].name)
+ t = types.Type(core.BNNewTypeReference(call_list[i].type), platform = self)
+ result[call_list[i].number] = (name, t)
+ core.BNFreeSystemCallList(call_list, count.value)
+ return result
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __repr__(self):
+ return "<platform: %s>" % self.name
+
+ def __str__(self):
+ return self.name
+
+[docs] def register(self, os):
+ """
+ ``register`` registers the platform for given OS name.
+
+ :param str os: OS name to register
+ :rtype: None
+ """
+ core.BNRegisterPlatform(os, self.handle)
+
+[docs] def register_calling_convention(self, cc):
+ """
+ ``register_calling_convention`` register a new calling convention.
+
+ :param CallingConvention cc: a CallingConvention object to register
+ :rtype: None
+ """
+ core.BNRegisterPlatformCallingConvention(self.handle, cc.handle)
+
+
+
+
+
+[docs] def get_associated_platform_by_address(self, addr):
+ new_addr = ctypes.c_ulonglong()
+ new_addr.value = addr
+ result = core.BNGetAssociatedPlatformByAddress(self.handle, new_addr)
+ return Platform(handle = result), new_addr.value
+
+[docs] def get_type_by_name(self, name):
+ name = types.QualifiedName(name)._get_core_struct()
+ obj = core.BNGetPlatformTypeByName(self.handle, name)
+ if not obj:
+ return None
+ return types.Type(obj, platform = self)
+
+[docs] def get_variable_by_name(self, name):
+ name = types.QualifiedName(name)._get_core_struct()
+ obj = core.BNGetPlatformVariableByName(self.handle, name)
+ if not obj:
+ return None
+ return types.Type(obj, platform = self)
+
+[docs] def get_function_by_name(self, name):
+ name = types.QualifiedName(name)._get_core_struct()
+ obj = core.BNGetPlatformFunctionByName(self.handle, name)
+ if not obj:
+ return None
+ return types.Type(obj, platform = self)
+
+[docs] def get_system_call_name(self, number):
+ return core.BNGetPlatformSystemCallName(self.handle, number)
+
+[docs] def get_system_call_type(self, number):
+ obj = core.BNGetPlatformSystemCallType(self.handle, number)
+ if not obj:
+ return None
+ return types.Type(obj, platform = self)
+
+[docs] def generate_auto_platform_type_id(self, name):
+ name = types.QualifiedName(name)._get_core_struct()
+ return core.BNGenerateAutoPlatformTypeId(self.handle, name)
+
+[docs] def generate_auto_platform_type_ref(self, type_class, name):
+ type_id = self.generate_auto_platform_type_id(name)
+ return types.NamedTypeReference(type_class, type_id, name)
+
+[docs] def get_auto_platform_type_id_source(self):
+ return core.BNGetAutoPlatformTypeIdSource(self.handle)
+
+[docs] def parse_types_from_source(self, source, filename=None, include_dirs=[], auto_type_source=None):
+ """
+ ``parse_types_from_source`` parses the source string and any needed headers searching for them in
+ the optional list of directories provided in ``include_dirs``.
+
+ :param str source: source string to be parsed
+ :param str filename: optional source filename
+ :param list(str) include_dirs: optional list of string filename include directories
+ :param str auto_type_source: optional source of types if used for automatically generated types
+ :return: :py:class:`TypeParserResult` (a SyntaxError is thrown on parse error)
+ :rtype: TypeParserResult
+ :Example:
+
+ >>> platform.parse_types_from_source('int foo;\\nint bar(int x);\\nstruct bas{int x,y;};\\n')
+ ({types: {'bas': <type: struct bas>}, variables: {'foo': <type: int32_t>}, functions:{'bar':
+ <type: int32_t(int32_t x)>}}, '')
+ >>>
+ """
+
+ if filename is None:
+ filename = "input"
+ dir_buf = (ctypes.c_char_p * len(include_dirs))()
+ for i in range(0, len(include_dirs)):
+ dir_buf[i] = include_dirs[i].encode('charmap')
+ parse = core.BNTypeParserResult()
+ errors = ctypes.c_char_p()
+ result = core.BNParseTypesFromSource(self.handle, source, filename, parse, errors, dir_buf,
+ len(include_dirs), auto_type_source)
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ if not result:
+ raise SyntaxError(error_str)
+ type_dict = {}
+ variables = {}
+ functions = {}
+ for i in range(0, parse.typeCount):
+ name = types.QualifiedName._from_core_struct(parse.types[i].name)
+ type_dict[name] = types.Type(core.BNNewTypeReference(parse.types[i].type), platform = self)
+ for i in range(0, parse.variableCount):
+ name = types.QualifiedName._from_core_struct(parse.variables[i].name)
+ variables[name] = types.Type(core.BNNewTypeReference(parse.variables[i].type), platform = self)
+ for i in range(0, parse.functionCount):
+ name = types.QualifiedName._from_core_struct(parse.functions[i].name)
+ functions[name] = types.Type(core.BNNewTypeReference(parse.functions[i].type), platform = self)
+ core.BNFreeTypeParserResult(parse)
+ return types.TypeParserResult(type_dict, variables, functions)
+
+[docs] def parse_types_from_source_file(self, filename, include_dirs=[], auto_type_source=None):
+ """
+ ``parse_types_from_source_file`` parses the source file ``filename`` and any needed headers searching for them in
+ the optional list of directories provided in ``include_dirs``.
+
+ :param str filename: filename of file to be parsed
+ :param list(str) include_dirs: optional list of string filename include directories
+ :param str auto_type_source: optional source of types if used for automatically generated types
+ :return: :py:class:`TypeParserResult` (a SyntaxError is thrown on parse error)
+ :rtype: TypeParserResult
+ :Example:
+
+ >>> file = "/Users/binja/tmp.c"
+ >>> open(file).read()
+ 'int foo;\\nint bar(int x);\\nstruct bas{int x,y;};\\n'
+ >>> platform.parse_types_from_source_file(file)
+ ({types: {'bas': <type: struct bas>}, variables: {'foo': <type: int32_t>}, functions:
+ {'bar': <type: int32_t(int32_t x)>}}, '')
+ >>>
+ """
+ dir_buf = (ctypes.c_char_p * len(include_dirs))()
+ for i in range(0, len(include_dirs)):
+ dir_buf[i] = include_dirs[i].encode('charmap')
+ parse = core.BNTypeParserResult()
+ errors = ctypes.c_char_p()
+ result = core.BNParseTypesFromSourceFile(self.handle, filename, parse, errors, dir_buf,
+ len(include_dirs), auto_type_source)
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ if not result:
+ raise SyntaxError(error_str)
+ type_dict = {}
+ variables = {}
+ functions = {}
+ for i in range(0, parse.typeCount):
+ name = types.QualifiedName._from_core_struct(parse.types[i].name)
+ type_dict[name] = types.Type(core.BNNewTypeReference(parse.types[i].type), platform = self)
+ for i in range(0, parse.variableCount):
+ name = types.QualifiedName._from_core_struct(parse.variables[i].name)
+ variables[name] = types.Type(core.BNNewTypeReference(parse.variables[i].type), platform = self)
+ for i in range(0, parse.functionCount):
+ name = types.QualifiedName._from_core_struct(parse.functions[i].name)
+ functions[name] = types.Type(core.BNNewTypeReference(parse.functions[i].type), platform = self)
+ core.BNFreeTypeParserResult(parse)
+ return types.TypeParserResult(type_dict, variables, functions)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import traceback
+import ctypes
+import threading
+
+# Binary Ninja components
+import binaryninja
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import PluginCommandType
+from binaryninja import filemetadata
+from binaryninja import binaryview
+from binaryninja import function
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import with_metaclass
+
+
+[docs]class PluginCommandContext(object):
+[docs] def __init__(self, view):
+ self.view = view
+ self.address = 0
+ self.length = 0
+ self.function = None
+ self.instruction = None
+
+
+class _PluginCommandMetaClass(type):
+ @property
+ def list(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ commands = core.BNGetAllPluginCommands(count)
+ result = []
+ for i in range(0, count.value):
+ result.append(PluginCommand(commands[i]))
+ core.BNFreePluginCommandList(commands)
+ return result
+
+ def __iter__(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ commands = core.BNGetAllPluginCommands(count)
+ try:
+ for i in range(0, count.value):
+ yield PluginCommand(commands[i])
+ finally:
+ core.BNFreePluginCommandList(commands)
+
+ def __setattr__(self, name, value):
+ try:
+ type.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+
+[docs]class PluginCommand(with_metaclass(_PluginCommandMetaClass, object)):
+ _registered_commands = []
+
+[docs] def __init__(self, cmd):
+ self.command = core.BNPluginCommand()
+ ctypes.memmove(ctypes.byref(self.command), ctypes.byref(cmd), ctypes.sizeof(core.BNPluginCommand))
+ self.name = str(cmd.name)
+ self.description = str(cmd.description)
+ self.type = PluginCommandType(cmd.type)
+
+ @property
+ def list(self):
+ """Allow tab completion to discover metaclass list property"""
+ pass
+
+ @classmethod
+ def _default_action(cls, view, action):
+ try:
+ file_metadata = binaryninja.filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryninja.binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ action(view_obj)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+
+ @classmethod
+ def _address_action(cls, view, addr, action):
+ try:
+ file_metadata = binaryninja.filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryninja.binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ action(view_obj, addr)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+
+ @classmethod
+ def _range_action(cls, view, addr, length, action):
+ try:
+ file_metadata = binaryninja.filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryninja.binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ action(view_obj, addr, length)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+
+ @classmethod
+ def _function_action(cls, view, func, action):
+ try:
+ file_metadata = binaryninja.filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryninja.binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ func_obj = function.Function(view_obj, core.BNNewFunctionReference(func))
+ action(view_obj, func_obj)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+
+ @classmethod
+ def _low_level_il_function_action(cls, view, func, action):
+ try:
+ file_metadata = filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ owner = function.Function(view_obj, core.BNGetLowLevelILOwnerFunction(func))
+ func_obj = binaryninja.lowlevelil.LowLevelILFunction(owner.arch, core.BNNewLowLevelILFunctionReference(func), owner)
+ action(view_obj, func_obj)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+
+ @classmethod
+ def _low_level_il_instruction_action(cls, view, func, instr, action):
+ try:
+ file_metadata = filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ owner = function.Function(view_obj, core.BNGetLowLevelILOwnerFunction(func))
+ func_obj = binaryninja.lowlevelil.LowLevelILFunction(owner.arch, core.BNNewLowLevelILFunctionReference(func), owner)
+ action(view_obj, func_obj[instr])
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+
+ @classmethod
+ def _medium_level_il_function_action(cls, view, func, action):
+ try:
+ file_metadata = filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ owner = function.Function(view_obj, core.BNGetMediumLevelILOwnerFunction(func))
+ func_obj = binaryninja.mediumlevelil.MediumLevelILFunction(owner.arch, core.BNNewMediumLevelILFunctionReference(func), owner)
+ action(view_obj, func_obj)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+
+ @classmethod
+ def _medium_level_il_instruction_action(cls, view, func, instr, action):
+ try:
+ file_metadata = filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ owner = function.Function(view_obj, core.BNGetMediumLevelILOwnerFunction(func))
+ func_obj = binaryninja.mediumlevelil.MediumLevelILFunction(owner.arch, core.BNNewMediumLevelILFunctionReference(func), owner)
+ action(view_obj, func_obj[instr])
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+
+ @classmethod
+ def _default_is_valid(cls, view, is_valid):
+ try:
+ if is_valid is None:
+ return True
+ file_metadata = binaryninja.filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryninja.binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ return is_valid(view_obj)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+ return False
+
+ @classmethod
+ def _address_is_valid(cls, view, addr, is_valid):
+ try:
+ if is_valid is None:
+ return True
+ file_metadata = binaryninja.filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryninja.binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ return is_valid(view_obj, addr)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+ return False
+
+ @classmethod
+ def _range_is_valid(cls, view, addr, length, is_valid):
+ try:
+ if is_valid is None:
+ return True
+ file_metadata = binaryninja.filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryninja.binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ return is_valid(view_obj, addr, length)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+ return False
+
+ @classmethod
+ def _function_is_valid(cls, view, func, is_valid):
+ try:
+ if is_valid is None:
+ return True
+ file_metadata = binaryninja.filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryninja.binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ func_obj = function.Function(view_obj, core.BNNewFunctionReference(func))
+ return is_valid(view_obj, func_obj)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+ return False
+
+ @classmethod
+ def _low_level_il_function_is_valid(cls, view, func, is_valid):
+ try:
+ if is_valid is None:
+ return True
+ file_metadata = filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ owner = function.Function(view_obj, core.BNGetLowLevelILOwnerFunction(func))
+ func_obj = binaryninja.lowlevelil.LowLevelILFunction(owner.arch, core.BNNewLowLevelILFunctionReference(func), owner)
+ return is_valid(view_obj, func_obj)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+ return False
+
+ @classmethod
+ def _low_level_il_instruction_is_valid(cls, view, func, instr, is_valid):
+ try:
+ if is_valid is None:
+ return True
+ file_metadata = filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ owner = function.Function(view_obj, core.BNGetLowLevelILOwnerFunction(func))
+ func_obj = binaryninja.lowlevelil.LowLevelILFunction(owner.arch, core.BNNewLowLevelILFunctionReference(func), owner)
+ return is_valid(view_obj, func_obj[instr])
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+ return False
+
+ @classmethod
+ def _medium_level_il_function_is_valid(cls, view, func, is_valid):
+ try:
+ if is_valid is None:
+ return True
+ file_metadata = filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ owner = function.Function(view_obj, core.BNGetMediumLevelILOwnerFunction(func))
+ func_obj = binaryninja.mediumlevelil.MediumLevelILFunction(owner.arch, core.BNNewMediumLevelILFunctionReference(func), owner)
+ return is_valid(view_obj, func_obj)
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+ return False
+
+ @classmethod
+ def _medium_level_il_instruction_is_valid(cls, view, func, instr, is_valid):
+ try:
+ if is_valid is None:
+ return True
+ file_metadata = filemetadata.FileMetadata(handle = core.BNGetFileForView(view))
+ view_obj = binaryview.BinaryView(file_metadata = file_metadata, handle = core.BNNewViewReference(view))
+ owner = function.Function(view_obj, core.BNGetMediumLevelILOwnerFunction(func))
+ func_obj = binaryninja.mediumlevelil.MediumLevelILFunction(owner.arch, core.BNNewMediumLevelILFunctionReference(func), owner)
+ return is_valid(view_obj, func_obj[instr])
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+ return False
+
+[docs] @classmethod
+ def register(cls, name, description, action, is_valid = None):
+ """
+ ``register`` Register a plugin
+
+ :param str name: name of the plugin
+ :param str description: description of the plugin
+ :param action: function to call with the ``BinaryView`` as an argument
+ :param is_valid: optional argument of a function passed a ``BinaryView`` to determine whether the plugin should be enabled for that view
+ :rtype: None
+
+ .. warning:: Calling ``register`` with the same function name will replace the existing function but will leak the memory of the original plugin.
+ """
+ binaryninja._init_plugins()
+ action_obj = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView))(lambda ctxt, view: cls._default_action(view, action))
+ is_valid_obj = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView))(lambda ctxt, view: cls._default_is_valid(view, is_valid))
+ cls._registered_commands.append((action_obj, is_valid_obj))
+ core.BNRegisterPluginCommand(name, description, action_obj, is_valid_obj, None)
+
+[docs] @classmethod
+ def register_for_address(cls, name, description, action, is_valid = None):
+ """
+ ``register_for_address`` Register a plugin to be called with an address argument
+
+ :param str name: name of the plugin
+ :param str description: description of the plugin
+ :param action: function to call with the ``BinaryView`` and address as arguments
+ :param is_valid: optional argument of a function passed a ``BinaryView`` to determine whether the plugin should be enabled for that view
+ :rtype: None
+
+ .. warning:: Calling ``register_for_address`` with the same function name will replace the existing function but will leak the memory of the original plugin.
+ """
+ binaryninja._init_plugins()
+ action_obj = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.c_ulonglong)(lambda ctxt, view, addr: cls._address_action(view, addr, action))
+ is_valid_obj = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.c_ulonglong)(lambda ctxt, view, addr: cls._address_is_valid(view, addr, is_valid))
+ cls._registered_commands.append((action_obj, is_valid_obj))
+ core.BNRegisterPluginCommandForAddress(name, description, action_obj, is_valid_obj, None)
+
+[docs] @classmethod
+ def register_for_range(cls, name, description, action, is_valid = None):
+ """
+ ``register_for_range`` Register a plugin to be called with a range argument
+
+ :param str name: name of the plugin
+ :param str description: description of the plugin
+ :param action: function to call with the ``BinaryView`` and ``AddressRange`` as arguments
+ :param is_valid: optional argument of a function passed a ``BinaryView`` to determine whether the plugin should be enabled for that view
+ :rtype: None
+
+ .. warning:: Calling ``register_for_range`` with the same function name will replace the existing function but will leak the memory of the original plugin.
+ """
+ binaryninja._init_plugins()
+ action_obj = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.c_ulonglong, ctypes.c_ulonglong)(lambda ctxt, view, addr, length: cls._range_action(view, addr, length, action))
+ is_valid_obj = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.c_ulonglong, ctypes.c_ulonglong)(lambda ctxt, view, addr, length: cls._range_is_valid(view, addr, length, is_valid))
+ cls._registered_commands.append((action_obj, is_valid_obj))
+ core.BNRegisterPluginCommandForRange(name, description, action_obj, is_valid_obj, None)
+
+[docs] @classmethod
+ def register_for_function(cls, name, description, action, is_valid = None):
+ """
+ ``register_for_function`` Register a plugin to be called with a function argument
+
+ :param str name: name of the plugin
+ :param str description: description of the plugin
+ :param action: function to call with the ``BinaryView`` and a ``Function`` as arguments
+ :param is_valid: optional argument of a function passed a ``BinaryView`` to determine whether the plugin should be enabled for that view
+ :rtype: None
+
+ .. warning:: Calling ``register_for_function`` with the same function name will replace the existing function but will leak the memory of the original plugin.
+ """
+ binaryninja._init_plugins()
+ action_obj = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.POINTER(core.BNFunction))(lambda ctxt, view, func: cls._function_action(view, func, action))
+ is_valid_obj = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.POINTER(core.BNFunction))(lambda ctxt, view, func: cls._function_is_valid(view, func, is_valid))
+ cls._registered_commands.append((action_obj, is_valid_obj))
+ core.BNRegisterPluginCommandForFunction(name, description, action_obj, is_valid_obj, None)
+
+[docs] @classmethod
+ def register_for_low_level_il_function(cls, name, description, action, is_valid = None):
+ """
+ ``register_for_low_level_il_function`` Register a plugin to be called with a low level IL function argument
+
+ :param str name: name of the plugin
+ :param str description: description of the plugin
+ :param action: function to call with the ``BinaryView`` and a ``LowLevelILFunction`` as arguments
+ :param is_valid: optional argument of a function passed a ``BinaryView`` to determine whether the plugin should be enabled for that view
+ :rtype: None
+
+ .. warning:: Calling ``register_for_low_level_il_function`` with the same function name will replace the existing function but will leak the memory of the original plugin.
+ """
+ binaryninja._init_plugins()
+ action_obj = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.POINTER(core.BNLowLevelILFunction))(lambda ctxt, view, func: cls._low_level_il_function_action(view, func, action))
+ is_valid_obj = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.POINTER(core.BNLowLevelILFunction))(lambda ctxt, view, func: cls._low_level_il_function_is_valid(view, func, is_valid))
+ cls._registered_commands.append((action_obj, is_valid_obj))
+ core.BNRegisterPluginCommandForLowLevelILFunction(name, description, action_obj, is_valid_obj, None)
+
+[docs] @classmethod
+ def register_for_low_level_il_instruction(cls, name, description, action, is_valid = None):
+ """
+ ``register_for_low_level_il_instruction`` Register a plugin to be called with a low level IL instruction argument
+
+ :param str name: name of the plugin
+ :param str description: description of the plugin
+ :param action: function to call with the ``BinaryView`` and a ``LowLevelILInstruction`` as arguments
+ :param is_valid: optional argument of a function passed a ``BinaryView`` to determine whether the plugin should be enabled for that view
+ :rtype: None
+
+ .. warning:: Calling ``register_for_low_level_il_instruction`` with the same function name will replace the existing function but will leak the memory of the original plugin.
+ """
+ binaryninja._init_plugins()
+ action_obj = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.POINTER(core.BNLowLevelILFunction), ctypes.c_ulonglong)(lambda ctxt, view, func, instr: cls._low_level_il_instruction_action(view, func, instr, action))
+ is_valid_obj = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.POINTER(core.BNLowLevelILFunction), ctypes.c_ulonglong)(lambda ctxt, view, func, instr: cls._low_level_il_instruction_is_valid(view, func, instr, is_valid))
+ cls._registered_commands.append((action_obj, is_valid_obj))
+ core.BNRegisterPluginCommandForLowLevelILInstruction(name, description, action_obj, is_valid_obj, None)
+
+[docs] @classmethod
+ def register_for_medium_level_il_function(cls, name, description, action, is_valid = None):
+ """
+ ``register_for_medium_level_il_function`` Register a plugin to be called with a medium level IL function argument
+
+ :param str name: name of the plugin
+ :param str description: description of the plugin
+ :param action: function to call with the ``BinaryView`` and a ``MediumLevelILFunction`` as arguments
+ :param is_valid: optional argument of a function passed a ``BinaryView`` to determine whether the plugin should be enabled for that view
+ :rtype: None
+
+ .. warning:: Calling ``register_for_medium_level_il_function`` with the same function name will replace the existing function but will leak the memory of the original plugin.
+ """
+ binaryninja._init_plugins()
+ action_obj = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.POINTER(core.BNMediumLevelILFunction))(lambda ctxt, view, func: cls._medium_level_il_function_action(view, func, action))
+ is_valid_obj = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.POINTER(core.BNMediumLevelILFunction))(lambda ctxt, view, func: cls._medium_level_il_function_is_valid(view, func, is_valid))
+ cls._registered_commands.append((action_obj, is_valid_obj))
+ core.BNRegisterPluginCommandForMediumLevelILFunction(name, description, action_obj, is_valid_obj, None)
+
+[docs] @classmethod
+ def register_for_medium_level_il_instruction(cls, name, description, action, is_valid = None):
+ """
+ ``register_for_medium_level_il_instruction`` Register a plugin to be called with a medium level IL instruction argument
+
+ :param str name: name of the plugin
+ :param str description: description of the plugin
+ :param action: function to call with the ``BinaryView`` and a ``MediumLevelILInstruction`` as arguments
+ :param is_valid: optional argument of a function passed a ``BinaryView`` to determine whether the plugin should be enabled for that view
+ :rtype: None
+
+ .. warning:: Calling ``register_for_medium_level_il_instruction`` with the same function name will replace the existing function but will leak the memory of the original plugin.
+ """
+ binaryninja._init_plugins()
+ action_obj = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.POINTER(core.BNMediumLevelILFunction), ctypes.c_ulonglong)(lambda ctxt, view, func, instr: cls._medium_level_il_instruction_action(view, func, instr, action))
+ is_valid_obj = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p, ctypes.POINTER(core.BNBinaryView), ctypes.POINTER(core.BNMediumLevelILFunction), ctypes.c_ulonglong)(lambda ctxt, view, func, instr: cls._medium_level_il_instruction_is_valid(view, func, instr, is_valid))
+ cls._registered_commands.append((action_obj, is_valid_obj))
+ core.BNRegisterPluginCommandForMediumLevelILInstruction(name, description, action_obj, is_valid_obj, None)
+
+[docs] @classmethod
+ def get_valid_list(cls, context):
+ """Dict of registered plugins"""
+ commands = cls.list
+ result = {}
+ for cmd in commands:
+ if cmd.is_valid(context):
+ result[cmd.name] = cmd
+ return result
+
+[docs] def is_valid(self, context):
+ if context.view is None:
+ return False
+ if self.command.type == PluginCommandType.DefaultPluginCommand:
+ if not self.command.defaultIsValid:
+ return True
+ return self.command.defaultIsValid(self.command.context, context.view.handle)
+ elif self.command.type == PluginCommandType.AddressPluginCommand:
+ if not self.command.addressIsValid:
+ return True
+ return self.command.addressIsValid(self.command.context, context.view.handle, context.address)
+ elif self.command.type == PluginCommandType.RangePluginCommand:
+ if context.length == 0:
+ return False
+ if not self.command.rangeIsValid:
+ return True
+ return self.command.rangeIsValid(self.command.context, context.view.handle, context.address, context.length)
+ elif self.command.type == PluginCommandType.FunctionPluginCommand:
+ if context.function is None:
+ return False
+ if not self.command.functionIsValid:
+ return True
+ return self.command.functionIsValid(self.command.context, context.view.handle, context.function.handle)
+ elif self.command.type == PluginCommandType.LowLevelILFunctionPluginCommand:
+ if context.function is None:
+ return False
+ if not self.command.lowLevelILFunctionIsValid:
+ return True
+ return self.command.lowLevelILFunctionIsValid(self.command.context, context.view.handle, context.function.handle)
+ elif self.command.type == PluginCommandType.LowLevelILInstructionPluginCommand:
+ if context.instruction is None:
+ return False
+ if not isinstance(context.instruction, binaryninja.lowlevelil.LowLevelILInstruction):
+ return False
+ if not self.command.lowLevelILInstructionIsValid:
+ return True
+ return self.command.lowLevelILInstructionIsValid(self.command.context, context.view.handle,
+ context.instruction.function.handle, context.instruction.instr_index)
+ elif self.command.type == PluginCommandType.MediumLevelILFunctionPluginCommand:
+ if context.function is None:
+ return False
+ if not self.command.mediumLevelILFunctionIsValid:
+ return True
+ return self.command.mediumLevelILFunctionIsValid(self.command.context, context.view.handle, context.function.handle)
+ elif self.command.type == PluginCommandType.MediumLevelILInstructionPluginCommand:
+ if context.instruction is None:
+ return False
+ if not isinstance(context.instruction, binaryninja.mediumlevelil.MediumLevelILInstruction):
+ return False
+ if not self.command.mediumLevelILInstructionIsValid:
+ return True
+ return self.command.mediumLevelILInstructionIsValid(self.command.context, context.view.handle,
+ context.instruction.function.handle, context.instruction.instr_index)
+ return False
+
+[docs] def execute(self, context):
+ if not self.is_valid(context):
+ return
+ if self.command.type == PluginCommandType.DefaultPluginCommand:
+ self.command.defaultCommand(self.command.context, context.view.handle)
+ elif self.command.type == PluginCommandType.AddressPluginCommand:
+ self.command.addressCommand(self.command.context, context.view.handle, context.address)
+ elif self.command.type == PluginCommandType.RangePluginCommand:
+ self.command.rangeCommand(self.command.context, context.view.handle, context.address, context.length)
+ elif self.command.type == PluginCommandType.FunctionPluginCommand:
+ self.command.functionCommand(self.command.context, context.view.handle, context.function.handle)
+ elif self.command.type == PluginCommandType.LowLevelILFunctionPluginCommand:
+ self.command.lowLevelILFunctionCommand(self.command.context, context.view.handle, context.function.handle)
+ elif self.command.type == PluginCommandType.LowLevelILInstructionPluginCommand:
+ self.command.lowLevelILInstructionCommand(self.command.context, context.view.handle,
+ context.instruction.function.handle, context.instruction.instr_index)
+ elif self.command.type == PluginCommandType.MediumLevelILFunctionPluginCommand:
+ self.command.mediumLevelILFunctionCommand(self.command.context, context.view.handle, context.function.handle)
+ elif self.command.type == PluginCommandType.MediumLevelILInstructionPluginCommand:
+ self.command.mediumLevelILInstructionCommand(self.command.context, context.view.handle,
+ context.instruction.function.handle, context.instruction.instr_index)
+
+ def __repr__(self):
+ return "<PluginCommand: %s>" % self.name
+
+
+[docs]class MainThreadAction(object):
+
+
+ def __del__(self):
+ core.BNFreeMainThreadAction(self.handle)
+
+
+
+ @property
+ def done(self):
+ return core.BNIsMainThreadActionDone(self.handle)
+
+
+
+
+[docs]class MainThreadActionHandler(object):
+ _main_thread = None
+
+[docs] def __init__(self):
+ self._cb = core.BNMainThreadCallbacks()
+ self._cb.context = 0
+ self._cb.addAction = self._cb.addAction.__class__(self._add_action)
+
+[docs] def register(self):
+ self.__class__._main_thread = self
+ core.BNRegisterMainThread(self._cb)
+
+ def _add_action(self, ctxt, action):
+ try:
+ self.add_action(MainThreadAction(action))
+ except:
+ binaryninja.log.log_error(traceback.format_exc())
+
+
+
+
+class _BackgroundTaskMetaclass(type):
+ @property
+ def list(self):
+ """List all running background tasks (read-only)"""
+ count = ctypes.c_ulonglong()
+ tasks = core.BNGetRunningBackgroundTasks(count)
+ result = []
+ for i in range(0, count.value):
+ result.append(BackgroundTask(handle=core.BNNewBackgroundTaskReference(tasks[i])))
+ core.BNFreeBackgroundTaskList(tasks, count.value)
+ return result
+
+ def __iter__(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ tasks = core.BNGetRunningBackgroundTasks(count)
+ try:
+ for i in range(0, count.value):
+ yield BackgroundTask(handle=core.BNNewBackgroundTaskReference(tasks[i]))
+ finally:
+ core.BNFreeBackgroundTaskList(tasks, count.value)
+
+
+[docs]class BackgroundTask(with_metaclass(_BackgroundTaskMetaclass, object)):
+[docs] def __init__(self, initial_progress_text = "", can_cancel = False, handle = None):
+ if handle is None:
+ self.handle = core.BNBeginBackgroundTask(initial_progress_text, can_cancel)
+ else:
+ self.handle = handle
+
+ def __del__(self):
+ core.BNFreeBackgroundTask(self.handle)
+
+ @property
+ def list(self):
+ """Allow tab completion to discover metaclass list property"""
+ pass
+
+ @property
+ def progress(self):
+ """Text description of the progress of the background task (displayed in status bar of the UI)"""
+ return core.BNGetBackgroundTaskProgressText(self.handle)
+
+ @progress.setter
+ def progress(self, value):
+ core.BNSetBackgroundTaskProgressText(self.handle, str(value))
+
+ @property
+ def can_cancel(self):
+ """Whether the task can be cancelled (read-only)"""
+ return core.BNCanCancelBackgroundTask(self.handle)
+
+ @property
+ def finished(self):
+ """Whether the task has finished"""
+ return core.BNIsBackgroundTaskFinished(self.handle)
+
+ @finished.setter
+ def finished(self, value):
+ if value:
+ self.finish()
+
+
+
+ @property
+ def cancelled(self):
+ """Whether the task has been cancelled"""
+ return core.BNIsBackgroundTaskCancelled(self.handle)
+
+ @cancelled.setter
+ def cancelled(self, value):
+ if value:
+ self.cancel()
+
+
+
+
+[docs]class BackgroundTaskThread(BackgroundTask):
+[docs] def __init__(self, initial_progress_text = "", can_cancel = False):
+ class _Thread(threading.Thread):
+ def __init__(self, task):
+ threading.Thread.__init__(self)
+ self.task = task
+
+ def run(self):
+ self.task.run()
+ self.task.finish()
+ self.task = None
+
+ BackgroundTask.__init__(self, initial_progress_text, can_cancel)
+ self.thread = _Thread(self)
+
+
+
+
+
+
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import ctypes
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+
+# 2-3 compatibility
+from binaryninja import range
+
+
+[docs]class RepoPlugin(object):
+ """
+ ``RepoPlugin` is mostly read-only, however you can install/uninstall enable/disable plugins. RepoPlugins are
+ created by parsing the plugins.json in a plugin repository.
+ """
+ from binaryninja.enums import PluginType, PluginUpdateStatus
+[docs] def __init__(self, handle):
+ raise Exception("RepoPlugin temporarily disabled!")
+ self.handle = core.handle_of_type(handle, core.BNRepoPlugin)
+
+ def __del__(self):
+ core.BNFreePlugin(self.handle)
+
+ def __repr__(self):
+ return "<{} {}/{}>".format(self.path, "installed" if self.installed else "not-installed", "enabled" if self.enabled else "disabled")
+
+ @property
+ def path(self):
+ """Relative path from the base of the repository to the actual plugin"""
+ return core.BNPluginGetPath(self.handle)
+
+ @property
+ def installed(self):
+ """Boolean True if the plugin is installed, False otherwise"""
+ return core.BNPluginIsInstalled(self.handle)
+
+ @installed.setter
+ def installed(self, state):
+ if state:
+ return core.BNPluginInstall(self.handle)
+ else:
+ return core.BNPluginUninstall(self.handle)
+
+ @property
+ def enabled(self):
+ """Boolean True if the plugin is currently enabled, False otherwise"""
+ return core.BNPluginIsEnabled(self.handle)
+
+ @enabled.setter
+ def enabled(self, state):
+ if state:
+ return core.BNPluginEnable(self.handle)
+ else:
+ return core.BNPluginDisable(self.handle)
+
+ @property
+ def api(self):
+ """string indicating the API used by the plugin"""
+ return core.BNPluginGetApi(self.handle)
+
+ @property
+ def description(self):
+ """String short description of the plugin"""
+ return core.BNPluginGetDescription(self.handle)
+
+ @property
+ def license(self):
+ """String short license description (ie MIT, BSD, GPLv2, etc)"""
+ return core.BNPluginGetLicense(self.handle)
+
+ @property
+ def license_text(self):
+ """String complete license text for the given plugin"""
+ return core.BNPluginGetLicenseText(self.handle)
+
+ @property
+ def long_description(self):
+ """String long description of the plugin"""
+ return core.BNPluginGetLongdescription(self.handle)
+
+ @property
+ def minimum_version(self):
+ """String minimum version the plugin was tested on"""
+ return core.BNPluginGetMinimimVersions(self.handle)
+
+ @property
+ def name(self):
+ """String name of the plugin"""
+ return core.BNPluginGetName(self.handle)
+
+ @property
+ def plugin_types(self):
+ """List of PluginType enumeration objects indicating the plugin type(s)"""
+ result = []
+ count = ctypes.c_ulonglong(0)
+ plugintypes = core.BNPluginGetPluginTypes(self.handle, count)
+ for i in range(count.value):
+ result.append(PluginType(plugintypes[i]))
+ core.BNFreePluginTypes(plugintypes)
+ return result
+
+ @property
+ def url(self):
+ """String URL of the plugin's git repository"""
+ return core.BNPluginGetUrl(self.handle)
+
+ @property
+ def version(self):
+ """String version of the plugin"""
+ return core.BNPluginGetVersion(self.handle)
+
+ @property
+ def update_status(self):
+ """PluginUpdateStatus enumeration indicating if the plugin is up to date or not"""
+ return PluginUpdateStatus(core.BNPluginGetPluginUpdateStatus(self.handle))
+
+
+[docs]class Repository(object):
+ """
+ ``Repository`` is a read-only class. Use RepositoryManager to Enable/Disable/Install/Uninstall plugins.
+ """
+[docs] def __init__(self, handle):
+ raise Exception("Repository temporarily disabled!")
+ self.handle = core.handle_of_type(handle, core.BNRepository)
+
+ def __del__(self):
+ core.BNFreeRepository(self.handle)
+
+ def __repr__(self):
+ return "<{} - {}/{}>".format(self.path, self.remote_reference, self.local_reference)
+
+ def __getitem__(self, plugin_path):
+ for plugin in self.plugins:
+ if plugin_path == plugin.path:
+ return plugin
+ raise KeyError()
+
+ @property
+ def url(self):
+ """String URL of the git repository where the plugin repository's are stored"""
+ return core.BNRepositoryGetUrl(self.handle)
+
+ @property
+ def path(self):
+ """String local path to store the given plugin repository"""
+ return core.BNRepositoryGetRepoPath(self.handle)
+
+ @property
+ def full_path(self):
+ """String full path the repository"""
+ return core.BNRepositoryGetPluginsPath(self.handle)
+
+ @property
+ def local_reference(self):
+ """String for the local git reference (ie 'master')"""
+ return core.BNRepositoryGetLocalReference(self.handle)
+
+ @property
+ def remote_reference(self):
+ """String for the remote git reference (ie 'origin')"""
+ return core.BNRepositoryGetRemoteReference(self.handle)
+
+ @property
+ def plugins(self):
+ """List of RepoPlugin objects contained within this repository"""
+ pluginlist = []
+ count = ctypes.c_ulonglong(0)
+ result = core.BNRepositoryGetPlugins(self.handle, count)
+ for i in range(count.value):
+ pluginlist.append(RepoPlugin(handle=result[i]))
+ core.BNFreeRepositoryPluginList(result, count.value)
+ del result
+ return pluginlist
+
+ @property
+ def initialized(self):
+ """Boolean True when the repository has been initialized"""
+ return core.BNRepositoryIsInitialized(self.handle)
+
+
+[docs]class RepositoryManager(object):
+ """
+ ``RepositoryManager`` Keeps track of all the repositories and keeps the enabled_plugins.json file coherent with
+ the plugins that are installed/uninstalled enabled/disabled
+ """
+[docs] def __init__(self, handle=None):
+ raise Exception("RepositoryManager temporarily disabled!")
+ self.handle = core.BNGetRepositoryManager()
+
+ def __getitem__(self, repo_path):
+ for repo in self.repositories:
+ if repo_path == repo.path:
+ return repo
+ raise KeyError()
+
+[docs] def check_for_updates(self):
+ """Check for updates for all managed Repository objects"""
+ return core.BNRepositoryManagerCheckForUpdates(self.handle)
+
+ @property
+ def repositories(self):
+ """List of Repository objects being managed"""
+ result = []
+ count = ctypes.c_ulonglong(0)
+ repos = core.BNRepositoryManagerGetRepositories(self.handle, count)
+ for i in range(count.value):
+ result.append(Repository(handle=repos[i]))
+ core.BNFreeRepositoryManagerRepositoriesList(repos)
+ return result
+
+ @property
+ def plugins(self):
+ """List of all RepoPlugins in each repository"""
+ plgs = {}
+ for repo in self.repositories:
+ plgs[repo.path] = repo.plugins
+ return plgs
+
+ @property
+ def default_repository(self):
+ """Gets the default Repository"""
+ binaryninja._init_plugins()
+ return Repository(handle=core.BNRepositoryManagerGetDefaultRepository(self.handle))
+
+[docs] def enable_plugin(self, plugin, install=True, repo=None):
+ """
+ ``enable_plugin`` Enables the installed plugin 'plugin', optionally installing the plugin if `install` is set to
+ True (default), and optionally using the non-default repository.
+
+ :param str name: Name of the plugin to enable
+ :param Boolean install: Optionally install the repo, defaults to True.
+ :param str repo: Optional, specify a repository other than the default repository.
+ :return: Boolean value True if the plugin was successfully enabled, False otherwise
+ :rtype: Boolean
+ :Example:
+
+ >>> mgr = RepositoryManager()
+ >>> mgr.enable_plugin('binaryninja-bookmarks')
+ True
+ >>>
+ """
+ if install:
+ if not self.install_plugin(plugin, repo):
+ return False
+
+ if repo is None:
+ repo = self.default_repository
+ repopath = repo
+ pluginpath = plugin
+ if not isinstance(repo, str):
+ repopath = repo.path
+ if not isinstance(plugin, str):
+ pluginpath = plugin.path
+ return core.BNRepositoryManagerEnablePlugin(self.handle, repopath, pluginpath)
+
+[docs] def disable_plugin(self, plugin, repo=None):
+ """
+ ``disable_plugin`` Disable the specified plugin, pluginpath
+
+ :param Repository or str repo: Repository containing the plugin to disable
+ :param RepoPlugin or str plugin: RepoPlugin to disable
+ :return: Boolean value True if the plugin was successfully disabled, False otherwise
+ :rtype: Boolean
+ :Example:
+
+ >>> mgr = RepositoryManager()
+ >>> mgr.disable_plugin('binaryninja-bookmarks')
+ True
+ >>>
+ """
+ if repo is None:
+ repo = self.default_repository
+ repopath = repo
+ pluginpath = plugin
+ if not isinstance(repo, str):
+ repopath = repo.path
+ if not isinstance(plugin, str):
+ pluginpath = plugin.path
+ return core.BNRepositoryManagerDisablePlugin(self.handle, repopath, pluginpath)
+
+[docs] def install_plugin(self, plugin, repo=None):
+ """
+ ``install_plugin`` install the specified plugin, pluginpath
+
+ :param Repository or str repo: Repository containing the plugin to install
+ :param RepoPlugin or str plugin: RepoPlugin to install
+ :return: Boolean value True if the plugin was successfully installed, False otherwise
+ :rtype: Boolean
+ :Example:
+
+ >>> mgr = RepositoryManager()
+ >>> mgr.install_plugin('binaryninja-bookmarks')
+ True
+ >>>
+ """
+ if repo is None:
+ repo = self.default_repository
+ repopath = repo
+ pluginpath = plugin
+ if not isinstance(repo, str):
+ repopath = repo.path
+ if not isinstance(plugin, str):
+ pluginpath = plugin.path
+ return core.BNRepositoryManagerInstallPlugin(self.handle, repopath, pluginpath)
+
+[docs] def uninstall_plugin(self, plugin, repo=None):
+ """
+ ``uninstall_plugin`` uninstall the specified plugin, pluginpath
+
+ :param Repository or str repo: Repository containing the plugin to uninstall
+ :param RepoPlugin or str plugin: RepoPlugin to uninstall
+ :return: Boolean value True if the plugin was successfully uninstalled, False otherwise
+ :rtype: Boolean
+ :Example:
+
+ >>> mgr = RepositoryManager()
+ >>> mgr.uninstall_plugin('binaryninja-bookmarks')
+ True
+ >>>
+ """
+ if repo is None:
+ repo = self.default_repository
+ repopath = repo
+ pluginpath = plugin
+ if not isinstance(repo, str):
+ repopath = repo.path
+ if not isinstance(plugin, str):
+ pluginpath = plugin.path
+ return core.BNRepositoryManagerUninstallPlugin(self.handle, repopath, pluginpath)
+
+[docs] def update_plugin(self, plugin, repo=None):
+ """
+ ``update_plugin`` update the specified plugin, pluginpath
+
+ :param Repository or str repo: Repository containing the plugin to update
+ :param RepoPlugin or str plugin: RepoPlugin to update
+ :return: Boolean value True if the plugin was successfully updated, False otherwise
+ :rtype: Boolean
+ :Example:
+
+ >>> mgr = RepositoryManager()
+ >>> mgr.update_plugin('binaryninja-bookmarks')
+ True
+ >>>
+ """
+ if repo is None:
+ repo = self.default_repository
+ repopath = repo
+ pluginpath = plugin
+ if not isinstance(repo, str):
+ repopath = repo.path
+ if not isinstance(plugin, str):
+ pluginpath = plugin.path
+ return core.BNRepositoryManagerUpdatePlugin(self.handle, repopath, pluginpath)
+
+[docs] def add_repository(self, url=None, repopath=None, localreference="master", remotereference="origin"):
+ """
+ ``add_repository`` adds a new plugin repository for the manager to track.
+
+ :param str url: URL to the git repository where the plugins are stored.
+ :param str repopath: path to where the repository will be stored on disk locally
+ :param str localreference: Optional reference to the local tracking branch typically "master"
+ :param str remotereference: Optional reference to the remote tracking branch typically "origin"
+ :return: Boolean value True if the repository was successfully added, False otherwise.
+ :rtype: Boolean
+ :Example:
+
+ >>> mgr = RepositoryManager()
+ >>> mgr.add_repository(url="https://github.com/vector35/community-plugins.git",
+ repopath="myrepo",
+ localreference="master", remotereference="origin")
+ True
+ >>>
+ """
+ if not (isinstance(url, str) and isinstance(repopath, str) and
+ isinstance(localreference, str) and isinstance(remotereference, str)):
+ raise ValueError("Parameter is incorrect type")
+
+ return core.BNRepositoryManagerAddRepository(self.handle, url, repopath, localreference, remotereference)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+import code
+import traceback
+import ctypes
+import threading
+import abc
+import sys
+
+# Binary Ninja components
+import binaryninja
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import ScriptingProviderExecuteResult, ScriptingProviderInputReadyState
+from binaryninja import log
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import with_metaclass
+
+
+class _ThreadActionContext(object):
+ _actions = []
+
+ def __init__(self, func):
+ self.func = func
+ self.interpreter = None
+ if hasattr(PythonScriptingInstance._interpreter, "value"):
+ self.interpreter = PythonScriptingInstance._interpreter.value
+ self.__class__._actions.append(self)
+ self.callback = ctypes.CFUNCTYPE(None, ctypes.c_void_p)(lambda ctxt: self.execute())
+
+ def execute(self):
+ old_interpreter = None
+ if hasattr(PythonScriptingInstance._interpreter, "value"):
+ old_interpreter = PythonScriptingInstance._interpreter.value
+ PythonScriptingInstance._interpreter.value = self.interpreter
+ try:
+ self.func()
+ finally:
+ PythonScriptingInstance._interpreter.value = old_interpreter
+ self.__class__._actions.remove(self)
+
+
+[docs]class ScriptingOutputListener(object):
+ def _register(self, handle):
+ self._cb = core.BNScriptingOutputListener()
+ self._cb.context = 0
+ self._cb.output = self._cb.output.__class__(self._output)
+ self._cb.error = self._cb.error.__class__(self._error)
+ self._cb.inputReadyStateChanged = self._cb.inputReadyStateChanged.__class__(self._input_ready_state_changed)
+ core.BNRegisterScriptingInstanceOutputListener(handle, self._cb)
+
+ def _unregister(self, handle):
+ core.BNUnregisterScriptingInstanceOutputListener(handle, self._cb)
+
+ def _output(self, ctxt, text):
+ try:
+ self.notify_output(text)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _error(self, ctxt, text):
+ try:
+ self.notify_error(text)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _input_ready_state_changed(self, ctxt, state):
+ try:
+ self.notify_input_ready_state_changed(state)
+ except:
+ log.log_error(traceback.format_exc())
+
+
+
+
+
+
+
+
+[docs]class ScriptingInstance(object):
+[docs] def __init__(self, provider, handle = None):
+ if handle is None:
+ self._cb = core.BNScriptingInstanceCallbacks()
+ self._cb.context = 0
+ self._cb.destroyInstance = self._cb.destroyInstance.__class__(self._destroy_instance)
+ self._cb.executeScriptInput = self._cb.executeScriptInput.__class__(self._execute_script_input)
+ self._cb.cancelScriptInput = self._cb.cancelScriptInput.__class__(self._cancel_script_input)
+ self._cb.setCurrentBinaryView = self._cb.setCurrentBinaryView.__class__(self._set_current_binary_view)
+ self._cb.setCurrentFunction = self._cb.setCurrentFunction.__class__(self._set_current_function)
+ self._cb.setCurrentBasicBlock = self._cb.setCurrentBasicBlock.__class__(self._set_current_basic_block)
+ self._cb.setCurrentAddress = self._cb.setCurrentAddress.__class__(self._set_current_address)
+ self._cb.setCurrentSelection = self._cb.setCurrentSelection.__class__(self._set_current_selection)
+ self.handle = core.BNInitScriptingInstance(provider.handle, self._cb)
+ else:
+ self.handle = core.handle_of_type(handle, core.BNScriptingInstance)
+ self.listeners = []
+
+ def __del__(self):
+ core.BNFreeScriptingInstance(self.handle)
+
+ def _destroy_instance(self, ctxt):
+ try:
+ self.perform_destroy_instance()
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _execute_script_input(self, ctxt, text):
+ try:
+ return self.perform_execute_script_input(text)
+ except:
+ log.log_error(traceback.format_exc())
+ return ScriptingProviderExecuteResult.InvalidScriptInput
+
+ def _cancel_script_input(self, ctxt):
+ try:
+ return self.perform_cancel_script_input()
+ except:
+ log.log_error(traceback.format_exc())
+ return ScriptingProviderExecuteResult.ScriptExecutionCancelled
+
+ def _set_current_binary_view(self, ctxt, view):
+ try:
+ if view:
+ view = binaryninja.binaryview.BinaryView(handle = core.BNNewViewReference(view))
+ else:
+ view = None
+ self.perform_set_current_binary_view(view)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _set_current_function(self, ctxt, func):
+ try:
+ if func:
+ func = binaryninja.function.Function(handle = core.BNNewFunctionReference(func))
+ else:
+ func = None
+ self.perform_set_current_function(func)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _set_current_basic_block(self, ctxt, block):
+ try:
+ if block:
+ func = core.BNGetBasicBlockFunction(block)
+ if func is None:
+ block = None
+ else:
+ block = binaryninja.basicblock.BasicBlock(core.BNNewBasicBlockReference(block),
+ binaryninja.binaryview.BinaryView(handle = core.BNGetFunctionData(func)))
+ core.BNFreeFunction(func)
+ else:
+ block = None
+ self.perform_set_current_basic_block(block)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _set_current_address(self, ctxt, addr):
+ try:
+ self.perform_set_current_address(addr)
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _set_current_selection(self, ctxt, begin, end):
+ try:
+ self.perform_set_current_selection(begin, end)
+ except:
+ log.log_error(traceback.format_exc())
+
+
+
+[docs] @abc.abstractmethod
+ def perform_execute_script_input(self, text):
+ return ScriptingProviderExecuteResult.InvalidScriptInput
+
+[docs] @abc.abstractmethod
+ def perform_cancel_script_input(self):
+ return ScriptingProviderExecuteResult.ScriptExecutionCancelled
+
+[docs] @abc.abstractmethod
+ def perform_set_current_binary_view(self, view):
+ raise NotImplementedError
+
+[docs] @abc.abstractmethod
+ def perform_set_current_function(self, func):
+ raise NotImplementedError
+
+[docs] @abc.abstractmethod
+ def perform_set_current_basic_block(self, block):
+ raise NotImplementedError
+
+[docs] @abc.abstractmethod
+ def perform_set_current_address(self, addr):
+ raise NotImplementedError
+
+[docs] @abc.abstractmethod
+ def perform_set_current_selection(self, begin, end):
+ raise NotImplementedError
+
+ @property
+ def input_ready_state(self):
+ return core.BNGetScriptingInstanceInputReadyState(self.handle)
+
+ @input_ready_state.setter
+ def input_ready_state(self, value):
+ core.BNNotifyInputReadyStateForScriptingInstance(self.handle, value.value)
+
+
+
+
+
+
+
+
+
+[docs] def set_current_binary_view(self, view):
+ if view is not None:
+ view = view.handle
+ core.BNSetScriptingInstanceCurrentBinaryView(self.handle, view)
+
+[docs] def set_current_function(self, func):
+ if func is not None:
+ func = func.handle
+ core.BNSetScriptingInstanceCurrentFunction(self.handle, func)
+
+[docs] def set_current_basic_block(self, block):
+ if block is not None:
+ block = block.handle
+ core.BNSetScriptingInstanceCurrentBasicBlock(self.handle, block)
+
+[docs] def set_current_address(self, addr):
+ core.BNSetScriptingInstanceCurrentAddress(self.handle, addr)
+
+[docs] def set_current_selection(self, begin, end):
+ core.BNSetScriptingInstanceCurrentSelection(self.handle, begin, end)
+
+[docs] def register_output_listener(self, listener):
+ listener._register(self.handle)
+ self.listeners.append(listener)
+
+[docs] def unregister_output_listener(self, listener):
+ if listener in self.listeners:
+ listener._unregister(self.handle)
+ self.listeners.remove(listener)
+
+
+class _ScriptingProviderMetaclass(type):
+
+ @property
+ def list(self):
+ """List all ScriptingProvider types (read-only)"""
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ types = core.BNGetScriptingProviderList(count)
+ result = []
+ for i in range(0, count.value):
+ result.append(ScriptingProvider(types[i]))
+ core.BNFreeScriptingProviderList(types)
+ return result
+
+ def __iter__(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ types = core.BNGetScriptingProviderList(count)
+ try:
+ for i in range(0, count.value):
+ yield ScriptingProvider(types[i])
+ finally:
+ core.BNFreeScriptingProviderList(types)
+
+ def __getitem__(self, value):
+ binaryninja._init_plugins()
+ provider = core.BNGetScriptingProviderByName(str(value))
+ if provider is None:
+ raise KeyError("'%s' is not a valid scripting provider" % str(value))
+ return ScriptingProvider(provider)
+
+ def __setattr__(self, name, value):
+ try:
+ type.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+
+[docs]class ScriptingProvider(with_metaclass(_ScriptingProviderMetaclass, object)):
+
+ name = None
+ instance_class = None
+ _registered_providers = []
+
+[docs] def __init__(self, handle = None):
+ if handle is not None:
+ self.handle = core.handle_of_type(handle, core.BNScriptingProvider)
+ self.__dict__["name"] = core.BNGetScriptingProviderName(handle)
+
+ @property
+ def list(self):
+ """Allow tab completion to discover metaclass list property"""
+ pass
+
+
+[docs] def register(self):
+ self._cb = core.BNScriptingProviderCallbacks()
+ self._cb.context = 0
+ self._cb.createInstance = self._cb.createInstance.__class__(self._create_instance)
+ self.handle = core.BNRegisterScriptingProvider(self.__class__.name, self._cb)
+ self.__class__._registered_providers.append(self)
+
+ def _create_instance(self, ctxt):
+ try:
+ result = self.__class__.instance_class(self)
+ if result is None:
+ return None
+ return ctypes.cast(core.BNNewScriptingInstanceReference(result.handle), ctypes.c_void_p).value
+ except:
+ log.log_error(traceback.format_exc())
+ return None
+
+[docs] def create_instance(self):
+ result = core.BNCreateScriptingProviderInstance(self.handle)
+ if result is None:
+ return None
+ return ScriptingInstance(self, handle = result)
+
+
+class _PythonScriptingInstanceOutput(object):
+ def __init__(self, orig, is_error):
+ self.orig = orig
+ self.is_error = is_error
+ self.buffer = ""
+ self.encoding = 'UTF-8'
+ self.errors = None
+ self.mode = 'w'
+ self.name = 'PythonScriptingInstanceOutput'
+ self.newlines = None
+
+ def close(self):
+ pass
+
+ def closed(self):
+ return False
+
+ def flush(self):
+ pass
+
+ def isatty(self):
+ return False
+
+ def next(self):
+ raise IOError("File not open for reading")
+
+ def read(self):
+ raise IOError("File not open for reading")
+
+ def readinto(self):
+ raise IOError("File not open for reading")
+
+ def readlines(self):
+ raise IOError("File not open for reading")
+
+ def seek(self):
+ pass
+
+ def sofspace(self):
+ return 0
+
+ def truncate(self):
+ pass
+
+ def tell(self):
+ return self.orig.tell()
+
+ def writelines(self, lines):
+ return self.write('\n'.join(lines))
+
+ def write(self, data):
+ interpreter = None
+ if hasattr(PythonScriptingInstance._interpreter, "value"):
+ interpreter = PythonScriptingInstance._interpreter.value
+
+ if interpreter is None:
+ if log.is_output_redirected_to_log():
+ self.buffer += data
+ while True:
+ i = self.buffer.find('\n')
+ if i == -1:
+ break
+ line = self.buffer[:i]
+ self.buffer = self.buffer[i + 1:]
+
+ if self.is_error:
+ log.log_error(line)
+ else:
+ log.log_info(line)
+ else:
+ self.orig.write(data)
+ else:
+ PythonScriptingInstance._interpreter.value = None
+ try:
+ if self.is_error:
+ interpreter.instance.error(data)
+ else:
+ interpreter.instance.output(data)
+ finally:
+ PythonScriptingInstance._interpreter.value = interpreter
+
+
+class _PythonScriptingInstanceInput(object):
+ def __init__(self, orig):
+ self.orig = orig
+
+ def isatty(self):
+ return False
+
+ def read(self, size):
+ interpreter = None
+ if hasattr(PythonScriptingInstance._interpreter, "value"):
+ interpreter = PythonScriptingInstance._interpreter.value
+
+ if interpreter is None:
+ return self.orig.read(size)
+ else:
+ PythonScriptingInstance._interpreter.value = None
+ try:
+ result = interpreter.read(size)
+ finally:
+ PythonScriptingInstance._interpreter.value = interpreter
+ return result
+
+ def readline(self):
+ interpreter = None
+ if hasattr(PythonScriptingInstance._interpreter, "value"):
+ interpreter = PythonScriptingInstance._interpreter.value
+
+ if interpreter is None:
+ return self.orig.readline()
+ else:
+ result = ""
+ while True:
+ data = interpreter.read(1)
+ result += data
+ if (len(data) == 0) or (data == "\n"):
+ break
+ return result
+
+
+[docs]class PythonScriptingInstance(ScriptingInstance):
+ _interpreter = threading.local()
+
+[docs] class InterpreterThread(threading.Thread):
+ def __init__(self, instance):
+ super(PythonScriptingInstance.InterpreterThread, self).__init__()
+ self.instance = instance
+ self.locals = {"__name__": "__console__", "__doc__": None, "binaryninja": sys.modules[__name__]}
+ self.interpreter = code.InteractiveConsole(self.locals)
+ self.event = threading.Event()
+ self.daemon = True
+
+ # Latest selections from UI
+ self.current_view = None
+ self.current_func = None
+ self.current_block = None
+ self.current_addr = 0
+ self.current_selection_begin = 0
+ self.current_selection_end = 0
+
+ # Selections that were current as of last issued command
+ self.active_view = None
+ self.active_func = None
+ self.active_block = None
+ self.active_addr = 0
+ self.active_selection_begin = 0
+ self.active_selection_end = 0
+
+ self.locals["get_selected_data"] = self.get_selected_data
+ self.locals["write_at_cursor"] = self.write_at_cursor
+
+ self.exit = False
+ self.code = None
+ self.input = ""
+
+ self.interpreter.push("from binaryninja import *")
+
+
+
+
+
+
+
+[docs] def read(self, size):
+ while not self.exit:
+ if len(self.input) > size:
+ result = self.input[:size]
+ self.input = self.input[size:]
+ return result
+ elif len(self.input) > 0:
+ result = self.input
+ self.input = ""
+ return result
+ self.instance.input_ready_state = ScriptingProviderInputReadyState.ReadyForScriptProgramInput
+ self.event.wait()
+ self.event.clear()
+ return ""
+
+[docs] def run(self):
+ while not self.exit:
+ self.event.wait()
+ self.event.clear()
+ if self.exit:
+ break
+ if self.code is not None:
+ self.instance.input_ready_state = ScriptingProviderInputReadyState.NotReadyForInput
+ code = self.code
+ self.code = None
+
+ PythonScriptingInstance._interpreter.value = self
+ try:
+ self.active_view = self.current_view
+ self.active_func = self.current_func
+ self.active_block = self.current_block
+ self.active_addr = self.current_addr
+ self.active_selection_begin = self.current_selection_begin
+ self.active_selection_end = self.current_selection_end
+
+ self.locals["current_view"] = self.active_view
+ self.locals["bv"] = self.active_view
+ self.locals["current_function"] = self.active_func
+ self.locals["current_basic_block"] = self.active_block
+ self.locals["current_address"] = self.active_addr
+ self.locals["here"] = self.active_addr
+ self.locals["current_selection"] = (self.active_selection_begin, self.active_selection_end)
+ if self.active_func is None:
+ self.locals["current_llil"] = None
+ self.locals["current_mlil"] = None
+ else:
+ self.locals["current_llil"] = self.active_func.llil
+ self.locals["current_mlil"] = self.active_func.mlil
+
+ for line in code.split(b'\n'):
+ self.interpreter.push(line.decode('charmap'))
+
+ tryNavigate = True
+ if isinstance(self.locals["here"], str) or isinstance(self.locals["current_address"], str):
+ try:
+ self.locals["here"] = self.active_view.parse_expression(self.locals["here"], self.active_addr)
+ except ValueError as e:
+ sys.stderr.write(e)
+ tryNavigate = False
+ if tryNavigate:
+ if self.locals["here"] != self.active_addr:
+ if not self.active_view.file.navigate(self.active_view.file.view, self.locals["here"]):
+ sys.stderr.write("Address 0x%x is not valid for the current view\n" % self.locals["here"])
+ elif self.locals["current_address"] != self.active_addr:
+ if not self.active_view.file.navigate(self.active_view.file.view, self.locals["current_address"]):
+ sys.stderr.write("Address 0x%x is not valid for the current view\n" % self.locals["current_address"])
+ if self.active_view is not None:
+ self.active_view.update_analysis()
+ except:
+ traceback.print_exc()
+ finally:
+ PythonScriptingInstance._interpreter.value = None
+ self.instance.input_ready_state = ScriptingProviderInputReadyState.ReadyForScriptExecution
+
+[docs] def get_selected_data(self):
+ if self.active_view is None:
+ return None
+ length = self.active_selection_end - self.active_selection_begin
+ return self.active_view.read(self.active_selection_begin, length)
+
+[docs] def write_at_cursor(self, data):
+ if self.active_view is None:
+ return 0
+ selected_length = self.active_selection_end - self.active_selection_begin
+ data = str(data)
+ if (len(data) == selected_length) or (selected_length == 0):
+ return self.active_view.write(self.active_selection_begin, data)
+ else:
+ self.active_view.remove(self.active_selection_begin, selected_length)
+ return self.active_view.insert(self.active_selection_begin, data)
+
+[docs] def __init__(self, provider):
+ super(PythonScriptingInstance, self).__init__(provider)
+ self.interpreter = PythonScriptingInstance.InterpreterThread(self)
+ self.interpreter.start()
+ self.queued_input = ""
+ self.input_ready_state = ScriptingProviderInputReadyState.ReadyForScriptExecution
+
+
+
+[docs] @abc.abstractmethod
+ def perform_execute_script_input(self, text):
+ if self.input_ready_state == ScriptingProviderInputReadyState.NotReadyForInput:
+ return ScriptingProviderExecuteResult.InvalidScriptInput
+
+ if self.input_ready_state == ScriptingProviderInputReadyState.ReadyForScriptProgramInput:
+ if len(text) == 0:
+ return ScriptingProviderExecuteResult.SuccessfulScriptExecution
+ self.input_ready_state = ScriptingProviderInputReadyState.NotReadyForInput
+ self.interpreter.add_input(text)
+ return ScriptingProviderExecuteResult.SuccessfulScriptExecution
+
+ try:
+ if isinstance(text, str):
+ result = code.compile_command(text)
+ else:
+ result = code.compile_command(text.decode("charmap"))
+ except:
+ result = False
+
+ if result is None:
+ # Command is not complete, ask for more input
+ return ScriptingProviderExecuteResult.IncompleteScriptInput
+
+ self.input_ready_state = ScriptingProviderInputReadyState.NotReadyForInput
+ self.interpreter.execute(text)
+ return ScriptingProviderExecuteResult.SuccessfulScriptExecution
+
+[docs] @abc.abstractmethod
+ def perform_cancel_script_input(self):
+ for tid, tobj in threading._active.items():
+ if tobj is self.interpreter:
+ if ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(KeyboardInterrupt)) != 1:
+ ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), None)
+ break
+
+[docs] @abc.abstractmethod
+ def perform_set_current_binary_view(self, view):
+ self.interpreter.current_view = view
+
+[docs] @abc.abstractmethod
+ def perform_set_current_function(self, func):
+ self.interpreter.current_func = func
+
+[docs] @abc.abstractmethod
+ def perform_set_current_basic_block(self, block):
+ self.interpreter.current_block = block
+
+[docs] @abc.abstractmethod
+ def perform_set_current_address(self, addr):
+ self.interpreter.current_addr = addr
+
+[docs] @abc.abstractmethod
+ def perform_set_current_selection(self, begin, end):
+ self.interpreter.current_selection_begin = begin
+ self.interpreter.current_selection_end = end
+
+
+[docs]class PythonScriptingProvider(ScriptingProvider):
+ name = "Python"
+ instance_class = PythonScriptingInstance
+
+
+PythonScriptingProvider().register()
+# Wrap stdin/stdout/stderr for Python scripting provider implementation
+original_stdin = sys.stdin
+original_stdout = sys.stdout
+original_stderr = sys.stderr
+
+[docs]def redirect_stdio():
+ sys.stdin = _PythonScriptingInstanceInput(sys.stdin)
+ sys.stdout = _PythonScriptingInstanceOutput(sys.stdout, False)
+ sys.stderr = _PythonScriptingInstanceOutput(sys.stderr, True)
+ sys.excepthook = sys.__excepthook__
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import ctypes
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import pyNativeStr
+from binaryninja.enums import SettingsScope
+
+
+[docs]class Settings(object):
+
+
+[docs] def register_group(self, group, title):
+ """
+ ``register_group`` registers a group for use with this Settings registry. Groups provide a simple way to organize settings.
+
+ :param str group: a unique identifier
+ :param str title: a user friendly name appropriate for UI presentation
+ :return: True on success, False on failure.
+ :rtype: bool
+ :Example:
+
+ >>> Settings().register_group("solver", "Solver")
+ True
+ >>>
+ """
+ return core.BNSettingsRegisterGroup(self.registry_id, group, title)
+
+[docs] def register_setting(self, id, properties):
+ """
+ ``register_setting`` registers a new setting with this Settings registry.
+
+ :param str id: a unique setting identifier in the form <group>.<id>
+ :param str properties: a JSON string describes the setting schema
+ :return: True on success, False on failure.
+ :rtype: bool
+ :Example:
+
+ >>> Settings().register_group("solver", "Solver")
+ True
+ >>> Settings().register_setting("solver.basicBlockSlicing", '{"description" : "Enable the basic block slicing in the solver.", "title" : "Basic Block Slicing", "default" : true, "type" : "boolean", "id" : "basicBlockSlicing"}')
+ True
+ """
+ return core.BNSettingsRegisterSetting(self.registry_id, id, properties)
+
+[docs] def update_property(self, id, setting_property):
+ return core.BNSettingsUpdateProperty(self.registry_id, tr(), id, setting_property)
+
+
+
+[docs] def copy_value(self, dest_registry_id, id):
+ return core.BNSettingsCopyValue(self.registry_id, dest_registry_id, id)
+
+[docs] def reset(self, id, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ return core.BNSettingsReset(self.registry_id, id, view, scope)
+
+[docs] def reset_all(self, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ return core.BNSettingsResetAll(self.registry_id, view, scope)
+
+[docs] def get_bool(self, id, view = None):
+ if view is not None:
+ view = view.handle
+ return core.BNSettingsGetBool(self.registry_id, id, view, None)
+
+[docs] def get_double(self, id, view = None):
+ if view is not None:
+ view = view.handle
+ return core.BNSettingsGetDouble(self.registry_id, id, view, None)
+
+[docs] def get_integer(self, id, view = None):
+ if view is not None:
+ view = view.handle
+ return core.BNSettingsGetUInt64(self.registry_id, id, view, None)
+
+[docs] def get_string(self, id, view = None):
+ if view is not None:
+ view = view.handle
+ return core.BNSettingsGetString(self.registry_id, id, view, None)
+
+[docs] def get_string_list(self, id, view = None):
+ if view is not None:
+ view = view.handle
+ length = ctypes.c_ulonglong()
+ result = core.BNSettingsGetStringList(self.registry_id, id, view, None, ctypes.byref(length))
+ out_list = []
+ for i in range(length.value):
+ out_list.append(pyNativeStr(result[i]))
+ core.BNFreeStringList(result, length)
+ return out_list
+
+[docs] def get_bool_with_scope(self, id, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ c_scope = core.SettingsScopeEnum(scope)
+ result = core.BNSettingsGetBool(self.registry_id, id, view, ctypes.byref(c_scope))
+ return (result, SettingsScope(c_scope.value))
+
+[docs] def get_double_with_scope(self, id, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ c_scope = core.SettingsScopeEnum(scope)
+ result = core.BNSettingsGetDouble(self.registry_id, id, view, ctypes.byref(c_scope))
+ return (result, SettingsScope(c_scope.value))
+
+[docs] def get_integer_with_scope(self, id, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ c_scope = core.SettingsScopeEnum(scope)
+ result = core.BNSettingsGetUInt64(self.registry_id, id, view, ctypes.byref(c_scope))
+ return (result, SettingsScope(c_scope.value))
+
+[docs] def get_string_with_scope(self, id, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ c_scope = core.SettingsScopeEnum(scope)
+ result = core.BNSettingsGetString(self.registry_id, id, view, ctypes.byref(c_scope))
+ return (result, SettingsScope(c_scope.value))
+
+[docs] def get_string_list_with_scope(self, id, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ c_scope = core.SettingsScopeEnum(scope)
+ length = ctypes.c_ulonglong()
+ result = core.BNSettingsGetStringList(self.registry_id, id, view, ctypes.byref(c_scope), ctypes.byref(length))
+ out_list = []
+ for i in range(length.value):
+ out_list.append(pyNativeStr(result[i]))
+ core.BNFreeStringList(result, length)
+ return (out_list, SettingsScope(c_scope.value))
+
+[docs] def set_bool(self, id, value, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ return core.BNSettingsSetBool(self.registry_id, view, scope, id, value)
+
+[docs] def set_double(self, id, value, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ return core.BNSettingsSetDouble(self.registry_id, view, scope, id, value)
+
+[docs] def set_integer(self, id, value, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ return core.BNSettingsSetUInt64(self.registry_id, view, scope, id, value)
+
+[docs] def set_string(self, id, value, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ return core.BNSettingsSetString(self.registry_id, view, scope, id, value)
+
+[docs] def set_string_list(self, id, value, view = None, scope = SettingsScope.SettingsAutoScope):
+ if view is not None:
+ view = view.handle
+ length = ctypes.c_ulonglong()
+ length.value = len(value)
+ string_list = (ctypes.c_char_p * len(value))()
+ for i in range(len(value)):
+ string_list[i] = value[i].encode('charmap')
+ return core.BNSettingsSetStringList(self.registry_id, view, scope, id, string_list, length)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import traceback
+import ctypes
+import abc
+
+# Binary Ninja components
+import binaryninja
+from binaryninja import log
+from binaryninja import databuffer
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import TransformType
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import with_metaclass
+
+
+class _TransformMetaClass(type):
+ @property
+ def list(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ xforms = core.BNGetTransformTypeList(count)
+ result = []
+ for i in range(0, count.value):
+ result.append(Transform(xforms[i]))
+ core.BNFreeTransformTypeList(xforms)
+ return result
+
+ def __iter__(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ xforms = core.BNGetTransformTypeList(count)
+ try:
+ for i in range(0, count.value):
+ yield Transform(xforms[i])
+ finally:
+ core.BNFreeTransformTypeList(xforms)
+
+ def __setattr__(self, name, value):
+ try:
+ type.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __getitem__(cls, name):
+ binaryninja._init_plugins()
+ xform = core.BNGetTransformByName(name)
+ if xform is None:
+ raise KeyError("'%s' is not a valid transform" % str(name))
+ return Transform(xform)
+
+ def register(cls):
+ binaryninja._init_plugins()
+ if cls.name is None:
+ raise ValueError("transform 'name' is not defined")
+ if cls.long_name is None:
+ cls.long_name = cls.name
+ if cls.transform_type is None:
+ raise ValueError("transform 'transform_type' is not defined")
+ if cls.group is None:
+ cls.group = ""
+ xform = cls(None)
+ cls._registered_cb = xform._cb
+ xform.handle = core.BNRegisterTransformType(cls.transform_type, cls.name, cls.long_name, cls.group, xform._cb)
+
+
+[docs]class TransformParameter(object):
+[docs] def __init__(self, name, long_name = None, fixed_length = 0):
+ self.name = name
+ if long_name is None:
+ self.long_name = name
+ else:
+ self.long_name = long_name
+ self.fixed_length = fixed_length
+
+ def __repr__(self):
+ return "<TransformParameter: {} fixed length: {}>".format(
+ self.long_name, self.fixed_length
+ )
+
+
+[docs]class Transform(with_metaclass(_TransformMetaClass, object)):
+ transform_type = None
+ name = None
+ long_name = None
+ group = None
+ parameters = []
+ _registered_cb = None
+
+[docs] def __init__(self, handle):
+ if handle is None:
+ self._cb = core.BNCustomTransform()
+ self._cb.context = 0
+ self._cb.getParameters = self._cb.getParameters.__class__(self._get_parameters)
+ self._cb.freeParameters = self._cb.freeParameters.__class__(self._free_parameters)
+ self._cb.decode = self._cb.decode.__class__(self._decode)
+ self._cb.encode = self._cb.encode.__class__(self._encode)
+ self._pending_param_lists = {}
+ self.type = self.__class__.transform_type
+ if not isinstance(self.type, str):
+ self.type = TransformType(self.type)
+ self.name = self.__class__.name
+ self.long_name = self.__class__.long_name
+ self.group = self.__class__.group
+ self.parameters = self.__class__.parameters
+ else:
+ self.handle = handle
+ self.type = TransformType(core.BNGetTransformType(self.handle))
+ self.name = core.BNGetTransformName(self.handle)
+ self.long_name = core.BNGetTransformLongName(self.handle)
+ self.group = core.BNGetTransformGroup(self.handle)
+ count = ctypes.c_ulonglong()
+ params = core.BNGetTransformParameterList(self.handle, count)
+ self.parameters = []
+ for i in range(0, count.value):
+ self.parameters.append(TransformParameter(params[i].name, params[i].longName, params[i].fixedLength))
+ core.BNFreeTransformParameterList(params, count.value)
+
+ def __repr__(self):
+ return "<transform: %s>" % self.name
+
+ def __eq__(self, value):
+ if not isinstance(value, Transform):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, Transform):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ def _get_parameters(self, ctxt, count):
+ try:
+ count[0] = len(self.parameters)
+ param_buf = (core.BNTransformParameterInfo * len(self.parameters))()
+ for i in range(0, len(self.parameters)):
+ param_buf[i].name = self.parameters[i].name
+ param_buf[i].longName = self.parameters[i].long_name
+ param_buf[i].fixedLength = self.parameters[i].fixed_length
+ result = ctypes.cast(param_buf, ctypes.c_void_p)
+ self._pending_param_lists[result.value] = (result, param_buf)
+ return result.value
+ except:
+ log.log_error(traceback.format_exc())
+ count[0] = 0
+ return None
+
+ def _free_parameters(self, params, count):
+ try:
+ buf = ctypes.cast(params, ctypes.c_void_p)
+ if buf.value not in self._pending_param_lists:
+ raise ValueError("freeing parameter list that wasn't allocated")
+ del self._pending_param_lists[buf.value]
+ except:
+ log.log_error(traceback.format_exc())
+
+ def _decode(self, ctxt, input_buf, output_buf, params, count):
+ try:
+ input_obj = databuffer.DataBuffer(handle = core.BNDuplicateDataBuffer(input_buf))
+ param_map = {}
+ for i in range(0, count):
+ data = databuffer.DataBuffer(handle = core.BNDuplicateDataBuffer(params[i].value))
+ param_map[params[i].name] = str(data)
+ result = self.perform_decode(str(input_obj), param_map)
+ if result is None:
+ return False
+ result = str(result)
+ core.BNSetDataBufferContents(output_buf, result, len(result))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _encode(self, ctxt, input_buf, output_buf, params, count):
+ try:
+ input_obj = databuffer.DataBuffer(handle = core.BNDuplicateDataBuffer(input_buf))
+ param_map = {}
+ for i in range(0, count):
+ data = databuffer.DataBuffer(handle = core.BNDuplicateDataBuffer(params[i].value))
+ param_map[params[i].name] = str(data)
+ result = self.perform_encode(str(input_obj), param_map)
+ if result is None:
+ return False
+ result = str(result)
+ core.BNSetDataBufferContents(output_buf, result, len(result))
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ @property
+ def list(self):
+ """Allow tab completion to discover metaclass list property"""
+ pass
+
+[docs] @abc.abstractmethod
+ def perform_decode(self, data, params):
+ if self.type == TransformType.InvertingTransform:
+ return self.perform_encode(data, params)
+ return None
+
+
+
+[docs] def decode(self, input_buf, params = {}):
+ input_buf = databuffer.DataBuffer(input_buf)
+ output_buf = databuffer.DataBuffer()
+ keys = list(params.keys())
+ param_buf = (core.BNTransformParameter * len(keys))()
+ for i in range(0, len(keys)):
+ data = databuffer.DataBuffer(params[keys[i]])
+ param_buf[i].name = keys[i]
+ param_buf[i].value = data.handle
+ if not core.BNDecode(self.handle, input_buf.handle, output_buf.handle, param_buf, len(keys)):
+ return None
+ return str(output_buf)
+
+[docs] def encode(self, input_buf, params = {}):
+ input_buf = databuffer.DataBuffer(input_buf)
+ output_buf = databuffer.DataBuffer()
+ keys = list(params.keys())
+ param_buf = (core.BNTransformParameter * len(keys))()
+ for i in range(0, len(keys)):
+ data = databuffer.DataBuffer(params[keys[i]])
+ param_buf[i].name = keys[i]
+ param_buf[i].value = data.handle
+ if not core.BNEncode(self.handle, input_buf.handle, output_buf.handle, param_buf, len(keys)):
+ return None
+ return str(output_buf)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from __future__ import absolute_import
+max_confidence = 255
+
+import ctypes
+
+# Binary Ninja components
+import binaryninja
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import SymbolType, SymbolBinding, TypeClass, NamedTypeReferenceClass, InstructionTextTokenType, StructureType, ReferenceType, VariableSourceType
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import pyNativeStr
+
+
+[docs]class QualifiedName(object):
+[docs] def __init__(self, name = []):
+ if isinstance(name, str):
+ self.name = [name]
+ self.byte_name = [name.encode('charmap')]
+ elif isinstance(name, QualifiedName):
+ self.name = name.name
+ self.byte_name = [n.encode('charmap') for n in name.name]
+ else:
+ self.name = [pyNativeStr(i) for i in name]
+ self.byte_name = name
+
+ def __str__(self):
+ return "::".join(self.name)
+
+ def __repr__(self):
+ return repr(str(self))
+
+ def __len__(self):
+ return len(self.name)
+
+ def __hash__(self):
+ return hash(str(self))
+
+ def __eq__(self, other):
+ if isinstance(other, str):
+ return str(self) == other
+ elif isinstance(other, list):
+ return self.name == other
+ elif isinstance(other, QualifiedName):
+ return self.name == other.name
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __lt__(self, other):
+ if isinstance(other, QualifiedName):
+ return self.name < other.name
+ return False
+
+ def __le__(self, other):
+ if isinstance(other, QualifiedName):
+ return self.name <= other.name
+ return False
+
+ def __gt__(self, other):
+ if isinstance(other, QualifiedName):
+ return self.name > other.name
+ return False
+
+ def __ge__(self, other):
+ if isinstance(other, QualifiedName):
+ return self.name >= other.name
+ return False
+
+ def __cmp__(self, other):
+ if self == other:
+ return 0
+ if self < other:
+ return -1
+ return 1
+
+ def __getitem__(self, key):
+ return self.name[key]
+
+ def __iter__(self):
+ return iter(self.name)
+
+ def _get_core_struct(self):
+ result = core.BNQualifiedName()
+ name_list = (ctypes.c_char_p * len(self.name))()
+ for i in range(0, len(self.name)):
+ name_list[i] = self.name[i].encode('charmap')
+ result.name = name_list
+ result.nameCount = len(self.name)
+ return result
+
+ @classmethod
+ def _from_core_struct(cls, name):
+ result = []
+ for i in range(0, name.nameCount):
+ result.append(name.name[i])
+ return QualifiedName(result)
+
+
+[docs]class NameSpace(QualifiedName):
+ def __str__(self):
+ return ":".join(self.name)
+
+ def _get_core_struct(self):
+ result = core.BNNameSpace()
+ name_list = (ctypes.c_char_p * len(self.name))()
+ for i in range(0, len(self.name)):
+ name_list[i] = self.name[i].encode('charmap')
+ result.name = name_list
+ result.nameCount = len(self.name)
+ return result
+
+ @classmethod
+ def _from_core_struct(cls, name):
+ result = []
+ for i in range(0, name.nameCount):
+ result.append(name.name[i])
+ return NameSpace(result)
+
+[docs]class Symbol(object):
+ """
+ Symbols are defined as one of the following types:
+
+ =========================== ==============================================================
+ SymbolType Description
+ =========================== ==============================================================
+ FunctionSymbol Symbol for Function that exists in the current binary
+ ImportAddressSymbol Symbol defined in the Import Address Table
+ ImportedFunctionSymbol Symbol for Function that is not defined in the current binary
+ DataSymbol Symbol for Data in the current binary
+ ImportedDataSymbol Symbol for Data that is not defined in the current binary
+ ExternalSymbol Symbols for data and code that reside outside the BinaryView
+ =========================== ==============================================================
+ """
+[docs] def __init__(self, sym_type, addr, short_name, full_name=None, raw_name=None, handle=None, binding=None, namespace=None):
+ if handle is not None:
+ self.handle = core.handle_of_type(handle, core.BNSymbol)
+ else:
+ if isinstance(sym_type, str):
+ sym_type = SymbolType[sym_type]
+ if full_name is None:
+ full_name = short_name
+ if raw_name is None:
+ raw_name = full_name
+ if binding is None:
+ binding = SymbolBinding.NoBinding
+ if isinstance(namespace, str):
+ namespace = NameSpace(namespace)
+ if isinstance(namespace, NameSpace):
+ namespace = namespace._get_core_struct()
+ self.handle = core.BNCreateSymbol(sym_type, short_name, full_name, raw_name, addr, binding, namespace)
+
+ def __del__(self):
+ core.BNFreeSymbol(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, Symbol):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, Symbol):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def type(self):
+ """Symbol type (read-only)"""
+ return SymbolType(core.BNGetSymbolType(self.handle))
+
+ @property
+ def binding(self):
+ """Symbol binding (read-only)"""
+ return SymbolBinding(core.BNGetSymbolBinding(self.handle))
+
+ @property
+ def namespace(self):
+ """Symbol namespace (read-only)"""
+ ns = core.BNGetSymbolNameSpace(self.handle)
+ result = NameSpace._from_core_struct(ns)
+ core.BNFreeNameSpace(ns)
+ return result
+
+ @property
+ def name(self):
+ """Symbol name (read-only)"""
+ return core.BNGetSymbolRawName(self.handle)
+
+ @property
+ def short_name(self):
+ """Symbol short name (read-only)"""
+ return core.BNGetSymbolShortName(self.handle)
+
+ @property
+ def full_name(self):
+ """Symbol full name (read-only)"""
+ return core.BNGetSymbolFullName(self.handle)
+
+ @property
+ def raw_name(self):
+ """Symbol raw name (read-only)"""
+ return core.BNGetSymbolRawName(self.handle)
+
+ @property
+ def address(self):
+ """Symbol address (read-only)"""
+ return core.BNGetSymbolAddress(self.handle)
+
+ @property
+ def auto(self):
+ return core.BNIsSymbolAutoDefined(self.handle)
+
+ def __repr__(self):
+ return "<%s: \"%s\" @ %#x>" % (self.type, self.full_name, self.address)
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+
+[docs]class FunctionParameter(object):
+[docs] def __init__(self, param_type, name = "", location = None):
+ self.type = param_type
+ self.name = name
+ self.location = location
+
+ def __repr__(self):
+ if (self.location is not None) and (self.location.name != self.name):
+ return "%s %s%s @ %s" % (self.type.get_string_before_name(), self.name, self.type.get_string_after_name(), self.location.name)
+ return "%s %s%s" % (self.type.get_string_before_name(), self.name, self.type.get_string_after_name())
+
+
+[docs]class Type(object):
+[docs] def __init__(self, handle, platform = None, confidence = max_confidence):
+ self.handle = handle
+ self.confidence = confidence
+ self.platform = platform
+
+ def __del__(self):
+ core.BNFreeType(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, Type):
+ return False
+ return core.BNTypesEqual(self.handle, value.handle)
+
+ def __ne__(self, value):
+ if not isinstance(value, Type):
+ return True
+ return core.BNTypesNotEqual(self.handle, value.handle)
+
+ @property
+ def type_class(self):
+ """Type class (read-only)"""
+ return TypeClass(core.BNGetTypeClass(self.handle))
+
+ @property
+ def width(self):
+ """Type width (read-only)"""
+ return core.BNGetTypeWidth(self.handle)
+
+ @property
+ def alignment(self):
+ """Type alignment (read-only)"""
+ return core.BNGetTypeAlignment(self.handle)
+
+ @property
+ def signed(self):
+ """Wether type is signed (read-only)"""
+ result = core.BNIsTypeSigned(self.handle)
+ return BoolWithConfidence(result.value, confidence = result.confidence)
+
+ @property
+ def const(self):
+ """Whether type is const (read/write)"""
+ result = core.BNIsTypeConst(self.handle)
+ return BoolWithConfidence(result.value, confidence = result.confidence)
+
+ @const.setter
+ def const(self, value):
+ bc = core.BNBoolWithConfidence()
+ bc.value = bool(value)
+ if hasattr(value, 'confidence'):
+ bc.confidence = value.confidence
+ else:
+ bc.confidence = max_confidence
+ core.BNTypeSetConst(self.handle, bc)
+
+ @property
+ def modified(self):
+ """Whether type is modified (read-only)"""
+ return core.BNIsTypeFloatingPoint(self.handle)
+
+ @property
+ def target(self):
+ """Target (read-only)"""
+ result = core.BNGetChildType(self.handle)
+ if not result.type:
+ return None
+ return Type(result.type, platform = self.platform, confidence = result.confidence)
+
+ @property
+ def element_type(self):
+ """Target (read-only)"""
+ result = core.BNGetChildType(self.handle)
+ if not result.type:
+ return None
+ return Type(result.type, platform = self.platform, confidence = result.confidence)
+
+ @property
+ def return_value(self):
+ """Return value (read-only)"""
+ result = core.BNGetChildType(self.handle)
+ if not result.type:
+ return None
+ return Type(result.type, platform = self.platform, confidence = result.confidence)
+
+ @property
+ def calling_convention(self):
+ """Calling convention (read-only)"""
+ result = core.BNGetTypeCallingConvention(self.handle)
+ if not result.convention:
+ return None
+ return binaryninja.callingconvention.CallingConvention(None, handle = result.convention, confidence = result.confidence)
+
+ @property
+ def parameters(self):
+ """Type parameters list (read-only)"""
+ count = ctypes.c_ulonglong()
+ params = core.BNGetTypeParameters(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ param_type = Type(core.BNNewTypeReference(params[i].type), platform = self.platform, confidence = params[i].typeConfidence)
+ if params[i].defaultLocation:
+ param_location = None
+ else:
+ name = params[i].name
+ if (params[i].location.type == VariableSourceType.RegisterVariableSourceType) and (self.platform is not None):
+ name = self.platform.arch.get_reg_name(params[i].location.storage)
+ elif params[i].location.type == VariableSourceType.StackVariableSourceType:
+ name = "arg_%x" % params[i].location.storage
+ param_location = binaryninja.function.Variable(None, params[i].location.type, params[i].location.index,
+ params[i].location.storage, name, param_type)
+ result.append(FunctionParameter(param_type, params[i].name, param_location))
+ core.BNFreeTypeParameterList(params, count.value)
+ return result
+
+ @property
+ def has_variable_arguments(self):
+ """Whether type has variable arguments (read-only)"""
+ result = core.BNTypeHasVariableArguments(self.handle)
+ return BoolWithConfidence(result.value, confidence = result.confidence)
+
+ @property
+ def can_return(self):
+ """Whether type can return (read-only)"""
+ result = core.BNFunctionTypeCanReturn(self.handle)
+ return BoolWithConfidence(result.value, confidence = result.confidence)
+
+ @property
+ def structure(self):
+ """Structure of the type (read-only)"""
+ result = core.BNGetTypeStructure(self.handle)
+ if result is None:
+ return None
+ return Structure(result)
+
+ @property
+ def enumeration(self):
+ """Type enumeration (read-only)"""
+ result = core.BNGetTypeEnumeration(self.handle)
+ if result is None:
+ return None
+ return Enumeration(result)
+
+ @property
+ def named_type_reference(self):
+ """Reference to a named type (read-only)"""
+ result = core.BNGetTypeNamedTypeReference(self.handle)
+ if result is None:
+ return None
+ return NamedTypeReference(handle = result)
+
+ @property
+ def count(self):
+ """Type count (read-only)"""
+ return core.BNGetTypeElementCount(self.handle)
+
+ @property
+ def offset(self):
+ """Offset into structure (read-only)"""
+ return core.BNGetTypeOffset(self.handle)
+
+ @property
+ def stack_adjustment(self):
+ """Stack adjustment for function (read-only)"""
+ result = core.BNGetTypeStackAdjustment(self.handle)
+ return SizeWithConfidence(result.value, confidence = result.confidence)
+
+ def __len__(self):
+ return self.width
+
+ def __str__(self):
+ platform = None
+ if self.platform is not None:
+ platform = self.platform.handle
+ return core.BNGetTypeString(self.handle, platform)
+
+ def __repr__(self):
+ if self.confidence < max_confidence:
+ return "<type: %s, %d%% confidence>" % (str(self), (self.confidence * 100) // max_confidence)
+ return "<type: %s>" % str(self)
+
+[docs] def get_string_before_name(self):
+ platform = None
+ if self.platform is not None:
+ platform = self.platform.handle
+ return core.BNGetTypeStringBeforeName(self.handle, platform)
+
+[docs] def get_string_after_name(self):
+ platform = None
+ if self.platform is not None:
+ platform = self.platform.handle
+ return core.BNGetTypeStringAfterName(self.handle, platform)
+
+ @property
+ def tokens(self):
+ """Type string as a list of tokens (read-only)"""
+ return self.get_tokens()
+
+[docs] def get_tokens(self, base_confidence = max_confidence):
+ count = ctypes.c_ulonglong()
+ platform = None
+ if self.platform is not None:
+ platform = self.platform.handle
+ tokens = core.BNGetTypeTokens(self.handle, platform, base_confidence, count)
+ result = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens, count.value)
+ core.BNFreeInstructionText(tokens, count.value)
+ return result
+
+[docs] def get_tokens_before_name(self, base_confidence = max_confidence):
+ count = ctypes.c_ulonglong()
+ platform = None
+ if self.platform is not None:
+ platform = self.platform.handle
+ tokens = core.BNGetTypeTokensBeforeName(self.handle, platform, base_confidence, count)
+ result = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens, count.value)
+ core.BNFreeInstructionText(tokens, count.value)
+ return result
+
+[docs] def get_tokens_after_name(self, base_confidence = max_confidence):
+ count = ctypes.c_ulonglong()
+ platform = None
+ if self.platform is not None:
+ platform = self.platform.handle
+ tokens = core.BNGetTypeTokensAfterName(self.handle, platform, base_confidence, count)
+ result = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens, count.value)
+ core.BNFreeInstructionText(tokens, count.value)
+ return result
+
+
+
+
+
+
+
+[docs] @classmethod
+ def int(self, width, sign = None, altname=""):
+ """
+ ``int`` class method for creating an int Type.
+
+ :param int width: width of the integer in bytes
+ :param bool sign: optional variable representing signedness
+ :param string altname: alternate name for type
+ """
+ if sign is None:
+ sign = BoolWithConfidence(True, confidence = 0)
+ elif not isinstance(sign, BoolWithConfidence):
+ sign = BoolWithConfidence(sign)
+
+ sign_conf = core.BNBoolWithConfidence()
+ sign_conf.value = sign.value
+ sign_conf.confidence = sign.confidence
+
+ return Type(core.BNCreateIntegerType(width, sign_conf, altname))
+
+[docs] @classmethod
+ def float(self, width, altname=""):
+ """
+ ``float`` class method for creating an floating point Types.
+
+ :param int width: width of the floating point number in bytes
+ :param string altname: alternate name for type
+ """
+ return Type(core.BNCreateFloatType(width, altname))
+
+[docs] @classmethod
+ def structure_type(self, structure_type):
+ return Type(core.BNCreateStructureType(structure_type.handle))
+
+[docs] @classmethod
+ def named_type(self, named_type, width = 0, align = 1):
+ return Type(core.BNCreateNamedTypeReference(named_type.handle, width, align))
+
+[docs] @classmethod
+ def named_type_from_type_and_id(self, type_id, name, t):
+ name = QualifiedName(name)._get_core_struct()
+ if t is not None:
+ t = t.handle
+ return Type(core.BNCreateNamedTypeReferenceFromTypeAndId(type_id, name, t))
+
+[docs] @classmethod
+ def named_type_from_type(self, name, t):
+ name = QualifiedName(name)._get_core_struct()
+ if t is not None:
+ t = t.handle
+ return Type(core.BNCreateNamedTypeReferenceFromTypeAndId("", name, t))
+
+[docs] @classmethod
+ def named_type_from_registered_type(self, view, name):
+ name = QualifiedName(name)._get_core_struct()
+ return Type(core.BNCreateNamedTypeReferenceFromType(view.handle, name))
+
+[docs] @classmethod
+ def enumeration_type(self, arch, e, width=None, sign=False):
+ if width is None:
+ width = arch.default_int_size
+ return Type(core.BNCreateEnumerationType(arch.handle, e.handle, width, sign))
+
+[docs] @classmethod
+ def pointer(self, arch, t, const=None, volatile=None, ref_type=None):
+ if const is None:
+ const = BoolWithConfidence(False, confidence = 0)
+ elif not isinstance(const, BoolWithConfidence):
+ const = BoolWithConfidence(const)
+
+ if volatile is None:
+ volatile = BoolWithConfidence(False, confidence = 0)
+ elif not isinstance(volatile, BoolWithConfidence):
+ volatile = BoolWithConfidence(volatile)
+
+ if ref_type is None:
+ ref_type = ReferenceType.PointerReferenceType
+
+ type_conf = core.BNTypeWithConfidence()
+ type_conf.type = t.handle
+ type_conf.confidence = t.confidence
+
+ const_conf = core.BNBoolWithConfidence()
+ const_conf.value = const.value
+ const_conf.confidence = const.confidence
+
+ volatile_conf = core.BNBoolWithConfidence()
+ volatile_conf.value = volatile.value
+ volatile_conf.confidence = volatile.confidence
+
+ return Type(core.BNCreatePointerType(arch.handle, type_conf, const_conf, volatile_conf, ref_type))
+
+[docs] @classmethod
+ def array(self, t, count):
+ type_conf = core.BNTypeWithConfidence()
+ type_conf.type = t.handle
+ type_conf.confidence = t.confidence
+ return Type(core.BNCreateArrayType(type_conf, count))
+
+[docs] @classmethod
+ def function(self, ret, params, calling_convention=None, variable_arguments=None, stack_adjust=None):
+ """
+ ``function`` class method for creating an function Type.
+
+ :param Type ret: width of the integer in bytes
+ :param list(Type) params: list of parameter Types
+ :param CallingConvention calling_convention: optional argument for function calling convention
+ :param bool variable_arguments: optional argument for functions that have a variable number of arguments
+ """
+ param_buf = (core.BNFunctionParameter * len(params))()
+ for i in range(0, len(params)):
+ if isinstance(params[i], Type):
+ param_buf[i].name = ""
+ param_buf[i].type = params[i].handle
+ param_buf[i].typeConfidence = params[i].confidence
+ param_buf[i].defaultLocation = True
+ elif isinstance(params[i], FunctionParameter):
+ param_buf[i].name = params[i].name
+ param_buf[i].type = params[i].type.handle
+ param_buf[i].typeConfidence = params[i].type.confidence
+ if params[i].location is None:
+ param_buf[i].defaultLocation = True
+ else:
+ param_buf[i].defaultLocation = False
+ param_buf[i].location.type = params[i].location.type
+ param_buf[i].location.index = params[i].location.index
+ param_buf[i].location.storage = params[i].location.storage
+ else:
+ param_buf[i].name = params[i][1]
+ param_buf[i].type = params[i][0].handle
+ param_buf[i].typeConfidence = params[i][0].confidence
+ param_buf[i].defaultLocation = True
+
+ ret_conf = core.BNTypeWithConfidence()
+ ret_conf.type = ret.handle
+ ret_conf.confidence = ret.confidence
+
+ conv_conf = core.BNCallingConventionWithConfidence()
+ if calling_convention is None:
+ conv_conf.convention = None
+ conv_conf.confidence = 0
+ else:
+ conv_conf.convention = calling_convention.handle
+ conv_conf.confidence = calling_convention.confidence
+
+ if variable_arguments is None:
+ variable_arguments = BoolWithConfidence(False, confidence = 0)
+ elif not isinstance(variable_arguments, BoolWithConfidence):
+ variable_arguments = BoolWithConfidence(variable_arguments)
+
+ vararg_conf = core.BNBoolWithConfidence()
+ vararg_conf.value = variable_arguments.value
+ vararg_conf.confidence = variable_arguments.confidence
+
+ if stack_adjust is None:
+ stack_adjust = SizeWithConfidence(0, confidence = 0)
+ elif not isinstance(stack_adjust, SizeWithConfidence):
+ stack_adjust = SizeWithConfidence(stack_adjust)
+
+ stack_adjust_conf = core.BNOffsetWithConfidence()
+ stack_adjust_conf.value = stack_adjust.value
+ stack_adjust_conf.confidence = stack_adjust.confidence
+
+ return Type(core.BNCreateFunctionType(ret_conf, conv_conf, param_buf, len(params),
+ vararg_conf, stack_adjust_conf))
+
+[docs] @classmethod
+ def generate_auto_type_id(self, source, name):
+ name = QualifiedName(name)._get_core_struct()
+ return core.BNGenerateAutoTypeId(source, name)
+
+[docs] @classmethod
+ def generate_auto_demangled_type_id(self, name):
+ name = QualifiedName(name)._get_core_struct()
+ return core.BNGenerateAutoDemangledTypeId(name)
+
+[docs] @classmethod
+ def get_auto_demangled_type_id_source(self):
+ return core.BNGetAutoDemangledTypeIdSource()
+
+[docs] def with_confidence(self, confidence):
+ return Type(handle = core.BNNewTypeReference(self.handle), platform = self.platform, confidence = confidence)
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+
+[docs]class BoolWithConfidence(object):
+[docs] def __init__(self, value, confidence = max_confidence):
+ self.value = value
+ self.confidence = confidence
+
+ def __str__(self):
+ return str(self.value)
+
+ def __repr__(self):
+ return repr(self.value)
+
+ def __bool__(self):
+ return self.value
+
+ def __nonzero__(self):
+ return self.value
+
+
+[docs]class SizeWithConfidence(object):
+[docs] def __init__(self, value, confidence = max_confidence):
+ self.value = value
+ self.confidence = confidence
+
+ def __str__(self):
+ return str(self.value)
+
+ def __repr__(self):
+ return repr(self.value)
+
+ def __int__(self):
+ return self.value
+
+
+[docs]class RegisterStackAdjustmentWithConfidence(object):
+[docs] def __init__(self, value, confidence = max_confidence):
+ self.value = value
+ self.confidence = confidence
+
+ def __str__(self):
+ return str(self.value)
+
+ def __repr__(self):
+ return repr(self.value)
+
+ def __int__(self):
+ return self.value
+
+
+[docs]class RegisterSet(object):
+[docs] def __init__(self, reg_list, confidence = max_confidence):
+ self.regs = reg_list
+ self.confidence = confidence
+
+ def __repr__(self):
+ return repr(self.regs)
+
+ def __iter__(self):
+ for reg in self.regs:
+ yield reg
+
+ def __getitem__(self, idx):
+ return self.regs[idx]
+
+ def __len__(self):
+ return len(self.regs)
+
+[docs] def with_confidence(self, confidence):
+ return RegisterSet(list(self.regs), confidence = confidence)
+
+
+[docs]class ReferenceTypeWithConfidence(object):
+[docs] def __init__(self, value, confidence = max_confidence):
+ self.value = value
+ self.confidence = confidence
+
+ def __str__(self):
+ return str(self.value)
+
+ def __repr__(self):
+ return repr(self.value)
+
+
+[docs]class NamedTypeReference(object):
+[docs] def __init__(self, type_class = NamedTypeReferenceClass.UnknownNamedTypeClass, type_id = None, name = None, handle = None):
+ if handle is None:
+ self.handle = core.BNCreateNamedType()
+ core.BNSetTypeReferenceClass(self.handle, type_class)
+ if type_id is not None:
+ core.BNSetTypeReferenceId(self.handle, type_id)
+ if name is not None:
+ name = QualifiedName(name)._get_core_struct()
+ core.BNSetTypeReferenceName(self.handle, name)
+ else:
+ self.handle = handle
+
+ def __del__(self):
+ core.BNFreeNamedTypeReference(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, NamedTypeReference):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, NamedTypeReference):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def type_class(self):
+ return NamedTypeReferenceClass(core.BNGetTypeReferenceClass(self.handle))
+
+ @type_class.setter
+ def type_class(self, value):
+ core.BNSetTypeReferenceClass(self.handle, value)
+
+ @property
+ def type_id(self):
+ return core.BNGetTypeReferenceId(self.handle)
+
+ @type_id.setter
+ def type_id(self, value):
+ core.BNSetTypeReferenceId(self.handle, value)
+
+ @property
+ def name(self):
+ name = core.BNGetTypeReferenceName(self.handle)
+ result = QualifiedName._from_core_struct(name)
+ core.BNFreeQualifiedName(name)
+ return result
+
+ @name.setter
+ def name(self, value):
+ value = QualifiedName(value)._get_core_struct()
+ core.BNSetTypeReferenceName(self.handle, value)
+
+ def __repr__(self):
+ if self.type_class == NamedTypeReferenceClass.TypedefNamedTypeClass:
+ return "<named type: typedef %s>" % str(self.name)
+ if self.type_class == NamedTypeReferenceClass.StructNamedTypeClass:
+ return "<named type: struct %s>" % str(self.name)
+ if self.type_class == NamedTypeReferenceClass.UnionNamedTypeClass:
+ return "<named type: union %s>" % str(self.name)
+ if self.type_class == NamedTypeReferenceClass.EnumNamedTypeClass:
+ return "<named type: enum %s>" % str(self.name)
+ return "<named type: unknown %s>" % str(self.name)
+
+[docs] @classmethod
+ def generate_auto_type_ref(self, type_class, source, name):
+ type_id = Type.generate_auto_type_id(source, name)
+ return NamedTypeReference(type_class, type_id, name)
+
+[docs] @classmethod
+ def generate_auto_demangled_type_ref(self, type_class, name):
+ type_id = Type.generate_auto_demangled_type_id(name)
+ return NamedTypeReference(type_class, type_id, name)
+
+
+[docs]class StructureMember(object):
+[docs] def __init__(self, t, name, offset):
+ self.type = t
+ self.name = name
+ self.offset = offset
+
+ def __repr__(self):
+ if len(self.name) == 0:
+ return "<member: %s, offset %#x>" % (str(self.type), self.offset)
+ return "<%s %s%s, offset %#x>" % (self.type.get_string_before_name(), self.name,
+ self.type.get_string_after_name(), self.offset)
+
+
+[docs]class Structure(object):
+[docs] def __init__(self, handle=None):
+ if handle is None:
+ self.handle = core.BNCreateStructure()
+ else:
+ self.handle = handle
+
+ def __del__(self):
+ core.BNFreeStructure(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, Structure):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, Structure):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def members(self):
+ """Structure member list (read-only)"""
+ count = ctypes.c_ulonglong()
+ members = core.BNGetStructureMembers(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(StructureMember(Type(core.BNNewTypeReference(members[i].type), confidence = members[i].typeConfidence),
+ members[i].name, members[i].offset))
+ core.BNFreeStructureMemberList(members, count.value)
+ return result
+
+ @property
+ def width(self):
+ """Structure width"""
+ return core.BNGetStructureWidth(self.handle)
+
+ @width.setter
+ def width(self, new_width):
+ core.BNSetStructureWidth(self.handle, new_width)
+
+ @property
+ def alignment(self):
+ """Structure alignment"""
+ return core.BNGetStructureAlignment(self.handle)
+
+ @alignment.setter
+ def alignment(self, align):
+ core.BNSetStructureAlignment(self.handle, align)
+
+ @property
+ def packed(self):
+ return core.BNIsStructurePacked(self.handle)
+
+ @packed.setter
+ def packed(self, value):
+ core.BNSetStructurePacked(self.handle, value)
+
+ @property
+ def union(self):
+ return core.BNIsStructureUnion(self.handle)
+
+ @property
+ def type(self):
+ return StructureType(core.BNGetStructureType(self.handle))
+
+ @type.setter
+ def type(self, value):
+ core.BNSetStructureType(self.handle, value)
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __repr__(self):
+ return "<struct: size %#x>" % self.width
+
+[docs] def append(self, t, name = ""):
+ tc = core.BNTypeWithConfidence()
+ tc.type = t.handle
+ tc.confidence = t.confidence
+ core.BNAddStructureMember(self.handle, tc, name)
+
+[docs] def insert(self, offset, t, name = ""):
+ tc = core.BNTypeWithConfidence()
+ tc.type = t.handle
+ tc.confidence = t.confidence
+ core.BNAddStructureMemberAtOffset(self.handle, tc, name, offset)
+
+
+
+[docs] def replace(self, i, t, name = ""):
+ tc = core.BNTypeWithConfidence()
+ tc.type = t.handle
+ tc.confidence = t.confidence
+ core.BNReplaceStructureMember(self.handle, i, tc, name)
+
+
+[docs]class EnumerationMember(object):
+[docs] def __init__(self, name, value, default):
+ self.name = name
+ self.value = value
+ self.default = default
+
+ def __repr__(self):
+ return "<%s = %#x>" % (self.name, self.value)
+
+
+[docs]class Enumeration(object):
+[docs] def __init__(self, handle=None):
+ if handle is None:
+ self.handle = core.BNCreateEnumeration()
+ else:
+ self.handle = handle
+
+ def __del__(self):
+ core.BNFreeEnumeration(self.handle)
+
+ def __eq__(self, value):
+ if not isinstance(value, Enumeration):
+ return False
+ return ctypes.addressof(self.handle.contents) == ctypes.addressof(value.handle.contents)
+
+ def __ne__(self, value):
+ if not isinstance(value, Enumeration):
+ return True
+ return ctypes.addressof(self.handle.contents) != ctypes.addressof(value.handle.contents)
+
+ @property
+ def members(self):
+ """Enumeration member list (read-only)"""
+ count = ctypes.c_ulonglong()
+ members = core.BNGetEnumerationMembers(self.handle, count)
+ result = []
+ for i in range(0, count.value):
+ result.append(EnumerationMember(members[i].name, members[i].value, members[i].isDefault))
+ core.BNFreeEnumerationMemberList(members, count.value)
+ return result
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __repr__(self):
+ return "<enum: %s>" % repr(self.members)
+
+[docs] def append(self, name, value = None):
+ if value is None:
+ core.BNAddEnumerationMember(self.handle, name)
+ else:
+ core.BNAddEnumerationMemberWithValue(self.handle, name, value)
+
+
+
+[docs] def replace(self, i, name, value):
+ core.BNReplaceEnumerationMember(self.handle, i, name, value)
+
+
+[docs]class TypeParserResult(object):
+[docs] def __init__(self, types, variables, functions):
+ self.types = types
+ self.variables = variables
+ self.functions = functions
+
+ def __repr__(self):
+ return "<types: %s, variables: %s, functions: %s>" % (self.types, self.variables, self.functions)
+
+
+[docs]def preprocess_source(source, filename=None, include_dirs=[]):
+ """
+ ``preprocess_source`` run the C preprocessor on the given source or source filename.
+
+ :param str source: source to pre-process
+ :param str filename: optional filename to pre-process
+ :param list(str) include_dirs: list of string directories to use as include directories.
+ :return: returns a tuple of (preprocessed_source, error_string)
+ :rtype: tuple(str,str)
+ :Example:
+
+ >>> source = "#define TEN 10\\nint x[TEN];\\n"
+ >>> preprocess_source(source)
+ ('#line 1 "input"\\n\\n#line 2 "input"\\n int x [ 10 ] ;\\n', '')
+ >>>
+ """
+ if filename is None:
+ filename = "input"
+ dir_buf = (ctypes.c_char_p * len(include_dirs))()
+ for i in range(0, len(include_dirs)):
+ dir_buf[i] = include_dirs[i].encode('charmap')
+ output = ctypes.c_char_p()
+ errors = ctypes.c_char_p()
+ result = core.BNPreprocessSource(source, filename, output, errors, dir_buf, len(include_dirs))
+ output_str = output.value
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(output, ctypes.POINTER(ctypes.c_byte)))
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ if result:
+ return (output_str, error_str)
+ return (None, error_str)
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import traceback
+import json
+import ctypes
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+from binaryninja.enums import ActionType
+import binaryninja
+
+
+[docs]class UndoAction(object):
+ name = None
+ action_type = None
+ _registered = False
+ _registered_cb = None
+
+[docs] def __init__(self, view):
+ self._cb = core.BNUndoAction()
+ if not self.__class__._registered:
+ raise TypeError("undo action type not registered")
+ action_type = self.__class__.action_type
+ if isinstance(action_type, str):
+ self._cb.type = ActionType[action_type]
+ else:
+ self._cb.type = action_type
+ self._cb.context = 0
+ self._cb.undo = self._cb.undo.__class__(self._undo)
+ self._cb.redo = self._cb.redo.__class__(self._redo)
+ self._cb.serialize = self._cb.serialize.__class__(self._serialize)
+ self.view = view
+
+[docs] @classmethod
+ def register(cls):
+ binaryninja._init_plugins()
+ if cls.name is None:
+ raise ValueError("undo action 'name' not defined")
+ if cls.action_type is None:
+ raise ValueError("undo action 'action_type' not defined")
+ cb_type = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p, ctypes.c_char_p, ctypes.POINTER(core.BNUndoAction))
+ cls._registered_cb = cb_type(cls._deserialize)
+ core.BNRegisterUndoActionType(cls.name, 0, cls._registered_cb)
+ cls._registered = True
+
+ @classmethod
+ def _deserialize(cls, ctxt, data, result):
+ try:
+ action = cls.deserialize(json.loads(data))
+ if action is None:
+ return False
+ result.context = action._cb.context
+ result.undo = action._cb.undo
+ result.redo = action._cb.redo
+ result.serialize = action._cb.serialize
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _undo(self, ctxt, view):
+ try:
+ self.undo()
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _redo(self, ctxt, view):
+ try:
+ self.redo()
+ except:
+ log.log_error(traceback.format_exc())
+ return False
+
+ def _serialize(self, ctxt):
+ try:
+ return json.dumps(self.serialize())
+ except:
+ log.log_error(traceback.format_exc())
+ return "null"
+
+# Copyright (c) 2015-2019 Vector 35 Inc
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import traceback
+import ctypes
+
+# Binary Ninja components
+from binaryninja import _binaryninjacore as core
+
+import binaryninja
+from binaryninja.enums import UpdateResult
+
+# 2-3 compatibility
+from binaryninja import range
+from binaryninja import with_metaclass
+
+
+class _UpdateChannelMetaClass(type):
+ @property
+ def list(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ errors = ctypes.c_char_p()
+ channels = core.BNGetUpdateChannels(count, errors)
+ if errors:
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise IOError(error_str)
+ result = []
+ for i in range(0, count.value):
+ result.append(UpdateChannel(channels[i].name, channels[i].description, channels[i].latestVersion))
+ core.BNFreeUpdateChannelList(channels, count.value)
+ return result
+
+ @property
+ def active(self):
+ return core.BNGetActiveUpdateChannel()
+
+ @active.setter
+ def active(self, value):
+ return core.BNSetActiveUpdateChannel(value)
+
+ def __iter__(self):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ errors = ctypes.c_char_p()
+ channels = core.BNGetUpdateChannels(count, errors)
+ if errors:
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise IOError(error_str)
+ try:
+ for i in range(0, count.value):
+ yield UpdateChannel(channels[i].name, channels[i].description, channels[i].latestVersion)
+ finally:
+ core.BNFreeUpdateChannelList(channels, count.value)
+
+ def __setattr__(self, name, value):
+ try:
+ type.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __getitem__(cls, name):
+ binaryninja._init_plugins()
+ count = ctypes.c_ulonglong()
+ errors = ctypes.c_char_p()
+ channels = core.BNGetUpdateChannels(count, errors)
+ if errors:
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise IOError(error_str)
+ result = None
+ for i in range(0, count.value):
+ if channels[i].name == str(name):
+ result = UpdateChannel(channels[i].name, channels[i].description, channels[i].latestVersion)
+ break
+ core.BNFreeUpdateChannelList(channels, count.value)
+ if result is None:
+ raise KeyError("'%s' is not a valid channel" % str(name))
+ return result
+
+
+[docs]class UpdateProgressCallback(object):
+[docs] def __init__(self, func):
+ self.cb = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p, ctypes.c_ulonglong, ctypes.c_ulonglong)(self.callback)
+ self.func = func
+
+[docs] def callback(self, ctxt, progress, total):
+ try:
+ if self.func is not None:
+ return self.func(progress, total)
+ return True
+ except:
+ log.log_error(traceback.format_exc())
+
+
+[docs]class UpdateChannel(with_metaclass(_UpdateChannelMetaClass, object)):
+[docs] def __init__(self, name, desc, ver):
+ self.name = name
+ self.description = desc
+ self.latest_version_num = ver
+
+ @property
+ def versions(self):
+ """List of versions (read-only)"""
+ count = ctypes.c_ulonglong()
+ errors = ctypes.c_char_p()
+ versions = core.BNGetUpdateChannelVersions(self.name, count, errors)
+ if errors:
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise IOError(error_str)
+ result = []
+ for i in range(0, count.value):
+ result.append(UpdateVersion(self, versions[i].version, versions[i].notes, versions[i].time))
+ core.BNFreeUpdateChannelVersionList(versions, count.value)
+ return result
+
+ @property
+ def latest_version(self):
+ """Latest version (read-only)"""
+ count = ctypes.c_ulonglong()
+ errors = ctypes.c_char_p()
+ versions = core.BNGetUpdateChannelVersions(self.name, count, errors)
+ if errors:
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise IOError(error_str)
+ result = None
+ for i in range(0, count.value):
+ if versions[i].version == self.latest_version_num:
+ result = UpdateVersion(self, versions[i].version, versions[i].notes, versions[i].time)
+ break
+ core.BNFreeUpdateChannelVersionList(versions, count.value)
+ return result
+
+ @property
+ def updates_available(self):
+ """Whether updates are available (read-only)"""
+ errors = ctypes.c_char_p()
+ result = core.BNAreUpdatesAvailable(self.name, None, None, errors)
+ if errors:
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise IOError(error_str)
+ return result
+
+ def __setattr__(self, name, value):
+ try:
+ object.__setattr__(self, name, value)
+ except AttributeError:
+ raise AttributeError("attribute '%s' is read only" % name)
+
+ def __repr__(self):
+ return "<channel: %s>" % self.name
+
+ def __str__(self):
+ return self.name
+
+[docs] def update_to_latest(self, progress = None):
+ cb = UpdateProgressCallback(progress)
+ errors = ctypes.c_char_p()
+ result = core.BNUpdateToLatestVersion(self.name, errors, cb.cb, None)
+ if errors:
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise IOError(error_str)
+ return UpdateResult(result)
+
+
+[docs]class UpdateVersion(object):
+[docs] def __init__(self, channel, ver, notes, t):
+ self.channel = channel
+ self.version = ver
+ self.notes = notes
+ self.time = t
+
+ def __repr__(self):
+ return "<version: %s>" % self.version
+
+ def __str__(self):
+ return self.version
+
+[docs] def update(self, progress = None):
+ cb = UpdateProgressCallback(progress)
+ errors = ctypes.c_char_p()
+ result = core.BNUpdateToVersion(self.channel.name, self.version, errors, cb.cb, None)
+ if errors:
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise IOError(error_str)
+ return UpdateResult(result)
+
+
+[docs]def are_auto_updates_enabled():
+ """
+ ``are_auto_updates_enabled`` queries if auto updates are enabled.
+
+ :return: boolean True if auto updates are enabled. False if they are disabled.
+ :rtype: bool
+ """
+ return core.BNAreAutoUpdatesEnabled()
+
+
+[docs]def set_auto_updates_enabled(enabled):
+ """
+ ``set_auto_updates_enabled`` sets auto update enabled status.
+
+ :param bool enabled: True to enable update, False to disable updates.
+ :rtype: None
+ """
+ core.BNSetAutoUpdatesEnabled(enabled)
+
+
+[docs]def get_time_since_last_update_check():
+ """
+ ``get_time_since_last_update_check`` returns the time stamp for the last time updates were checked.
+
+ :return: time stacmp for last update check
+ :rtype: int
+ """
+ return core.BNGetTimeSinceLastUpdateCheck()
+
+
+[docs]def is_update_installation_pending():
+ """
+ ``is_update_installation_pending`` whether an update has been downloaded and is waiting installation
+
+ :return: boolean True if an update is pending, false if no update is pending
+ :rtype: bool
+ """
+ return core.BNIsUpdateInstallationPending()
+
+
+[docs]def install_pending_update():
+ """
+ ``install_pending_update`` installs any pending updates
+
+ :rtype: None
+ """
+ errors = ctypes.c_char_p()
+ core.BNInstallPendingUpdate(errors)
+ if errors:
+ error_str = errors.value
+ core.BNFreeString(ctypes.cast(errors, ctypes.POINTER(ctypes.c_byte)))
+ raise IOError(error_str)
+
+
+
+
+'''This module implements specialized container datatypes providing
+alternatives to Python's general purpose built-in containers, dict,
+list, set, and tuple.
+
+* namedtuple factory function for creating tuple subclasses with named fields
+* deque list-like container with fast appends and pops on either end
+* Counter dict subclass for counting hashable objects
+* OrderedDict dict subclass that remembers the order entries were added
+* defaultdict dict subclass that calls a factory function to supply missing values
+
+'''
+
+__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
+# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
+# They should however be considered an integral part of collections.py.
+from _abcoll import *
+import _abcoll
+__all__ += _abcoll.__all__
+
+from _collections import deque, defaultdict
+from operator import itemgetter as _itemgetter, eq as _eq
+from keyword import iskeyword as _iskeyword
+import sys as _sys
+import heapq as _heapq
+from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
+from itertools import imap as _imap
+
+try:
+ from thread import get_ident as _get_ident
+except ImportError:
+ from dummy_thread import get_ident as _get_ident
+
+
+################################################################################
+### OrderedDict
+################################################################################
+
+[docs]class OrderedDict(dict):
+ 'Dictionary that remembers insertion order'
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as regular dictionaries.
+
+ # The internal self.__map dict maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # Each link is stored as a list of length three: [PREV, NEXT, KEY].
+
+[docs] def __init__(*args, **kwds):
+ '''Initialize an ordered dictionary. The signature is the same as
+ regular dictionaries, but keyword arguments are not recommended because
+ their insertion order is arbitrary.
+
+ '''
+ if not args:
+ raise TypeError("descriptor '__init__' of 'OrderedDict' object "
+ "needs an argument")
+ self = args[0]
+ args = args[1:]
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__root = root = [] # sentinel node
+ root[:] = [root, root, None]
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link at the end of the linked list,
+ # and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ root = self.__root
+ last = root[0]
+ last[1] = root[0] = self.__map[key] = [last, root, key]
+ return dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which gets
+ # removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link_prev, link_next, _ = self.__map.pop(key)
+ link_prev[1] = link_next # update link_prev[NEXT]
+ link_next[0] = link_prev # update link_next[PREV]
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ # Traverse the linked list in order.
+ root = self.__root
+ curr = root[1] # start at the first node
+ while curr is not root:
+ yield curr[2] # yield the curr[KEY]
+ curr = curr[1] # move to next node
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ # Traverse the linked list in reverse order.
+ root = self.__root
+ curr = root[0] # start at the last node
+ while curr is not root:
+ yield curr[2] # yield the curr[KEY]
+ curr = curr[0] # move to previous node
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ root = self.__root
+ root[:] = [root, root, None]
+ self.__map.clear()
+ dict.clear(self)
+
+ # -- the following methods do not depend on the internal structure --
+
+ def keys(self):
+ 'od.keys() -> list of keys in od'
+ return list(self)
+
+ def values(self):
+ 'od.values() -> list of values in od'
+ return [self[key] for key in self]
+
+ def items(self):
+ 'od.items() -> list of (key, value) pairs in od'
+ return [(key, self[key]) for key in self]
+
+ def iterkeys(self):
+ 'od.iterkeys() -> an iterator over the keys in od'
+ return iter(self)
+
+ def itervalues(self):
+ 'od.itervalues -> an iterator over the values in od'
+ for k in self:
+ yield self[k]
+
+ def iteritems(self):
+ 'od.iteritems -> an iterator over the (key, value) pairs in od'
+ for k in self:
+ yield (k, self[k])
+
+ update = MutableMapping.update
+
+ __update = update # let subclasses override update without breaking __init__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding
+ value. If key is not found, d is returned if given, otherwise KeyError
+ is raised.
+
+ '''
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ key = next(reversed(self) if last else iter(self))
+ value = self.pop(key)
+ return key, value
+
+ def __repr__(self, _repr_running={}):
+ 'od.__repr__() <==> repr(od)'
+ call_key = id(self), _get_ident()
+ if call_key in _repr_running:
+ return '...'
+ _repr_running[call_key] = 1
+ try:
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+ finally:
+ del _repr_running[call_key]
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ inst_dict = vars(self).copy()
+ for k in vars(OrderedDict()):
+ inst_dict.pop(k, None)
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def copy(self):
+ 'od.copy() -> a shallow copy of od'
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
+ If not specified, the value defaults to None.
+
+ '''
+ self = cls()
+ for key in iterable:
+ self[key] = value
+ return self
+
+ def __eq__(self, other):
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
+ while comparison to a regular mapping is order-insensitive.
+
+ '''
+ if isinstance(other, OrderedDict):
+ return dict.__eq__(self, other) and all(_imap(_eq, self, other))
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ 'od.__ne__(y) <==> od!=y'
+ return not self == other
+
+ # -- the following methods support python 3.x style dictionary views --
+
+ def viewkeys(self):
+ "od.viewkeys() -> a set-like object providing a view on od's keys"
+ return KeysView(self)
+
+ def viewvalues(self):
+ "od.viewvalues() -> an object providing a view on od's values"
+ return ValuesView(self)
+
+ def viewitems(self):
+ "od.viewitems() -> a set-like object providing a view on od's items"
+ return ItemsView(self)
+
+
+################################################################################
+### namedtuple
+################################################################################
+
+_class_template = '''\
+class {typename}(tuple):
+ '{typename}({arg_list})'
+
+ __slots__ = ()
+
+ _fields = {field_names!r}
+
+ def __new__(_cls, {arg_list}):
+ 'Create new instance of {typename}({arg_list})'
+ return _tuple.__new__(_cls, ({arg_list}))
+
+ @classmethod
+ def _make(cls, iterable, new=tuple.__new__, len=len):
+ 'Make a new {typename} object from a sequence or iterable'
+ result = new(cls, iterable)
+ if len(result) != {num_fields:d}:
+ raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
+ return result
+
+ def __repr__(self):
+ 'Return a nicely formatted representation string'
+ return '{typename}({repr_fmt})' % self
+
+ def _asdict(self):
+ 'Return a new OrderedDict which maps field names to their values'
+ return OrderedDict(zip(self._fields, self))
+
+ def _replace(_self, **kwds):
+ 'Return a new {typename} object replacing specified fields with new values'
+ result = _self._make(map(kwds.pop, {field_names!r}, _self))
+ if kwds:
+ raise ValueError('Got unexpected field names: %r' % kwds.keys())
+ return result
+
+ def __getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return tuple(self)
+
+ __dict__ = _property(_asdict)
+
+ def __getstate__(self):
+ 'Exclude the OrderedDict from pickling'
+ pass
+
+{field_defs}
+'''
+
+_repr_template = '{name}=%r'
+
+_field_template = '''\
+ {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
+'''
+
+def namedtuple(typename, field_names, verbose=False, rename=False):
+ """Returns a new subclass of tuple with named fields.
+
+ >>> Point = namedtuple('Point', ['x', 'y'])
+ >>> Point.__doc__ # docstring for the new class
+ 'Point(x, y)'
+ >>> p = Point(11, y=22) # instantiate with positional args or keywords
+ >>> p[0] + p[1] # indexable like a plain tuple
+ 33
+ >>> x, y = p # unpack like a regular tuple
+ >>> x, y
+ (11, 22)
+ >>> p.x + p.y # fields also accessible by name
+ 33
+ >>> d = p._asdict() # convert to a dictionary
+ >>> d['x']
+ 11
+ >>> Point(**d) # convert from a dictionary
+ Point(x=11, y=22)
+ >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
+ Point(x=100, y=22)
+
+ """
+
+ # Validate the field names. At the user's option, either generate an error
+ # message or automatically replace the field name with a valid name.
+ if isinstance(field_names, basestring):
+ field_names = field_names.replace(',', ' ').split()
+ field_names = map(str, field_names)
+ typename = str(typename)
+ if rename:
+ seen = set()
+ for index, name in enumerate(field_names):
+ if (not all(c.isalnum() or c=='_' for c in name)
+ or _iskeyword(name)
+ or not name
+ or name[0].isdigit()
+ or name.startswith('_')
+ or name in seen):
+ field_names[index] = '_%d' % index
+ seen.add(name)
+ for name in [typename] + field_names:
+ if type(name) != str:
+ raise TypeError('Type names and field names must be strings')
+ if not all(c.isalnum() or c=='_' for c in name):
+ raise ValueError('Type names and field names can only contain '
+ 'alphanumeric characters and underscores: %r' % name)
+ if _iskeyword(name):
+ raise ValueError('Type names and field names cannot be a '
+ 'keyword: %r' % name)
+ if name[0].isdigit():
+ raise ValueError('Type names and field names cannot start with '
+ 'a number: %r' % name)
+ seen = set()
+ for name in field_names:
+ if name.startswith('_') and not rename:
+ raise ValueError('Field names cannot start with an underscore: '
+ '%r' % name)
+ if name in seen:
+ raise ValueError('Encountered duplicate field name: %r' % name)
+ seen.add(name)
+
+ # Fill-in the class template
+ class_definition = _class_template.format(
+ typename = typename,
+ field_names = tuple(field_names),
+ num_fields = len(field_names),
+ arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
+ repr_fmt = ', '.join(_repr_template.format(name=name)
+ for name in field_names),
+ field_defs = '\n'.join(_field_template.format(index=index, name=name)
+ for index, name in enumerate(field_names))
+ )
+ if verbose:
+ print class_definition
+
+ # Execute the template string in a temporary namespace and support
+ # tracing utilities by setting a value for frame.f_globals['__name__']
+ namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
+ OrderedDict=OrderedDict, _property=property, _tuple=tuple)
+ try:
+ exec class_definition in namespace
+ except SyntaxError as e:
+ raise SyntaxError(e.message + ':\n' + class_definition)
+ result = namespace[typename]
+
+ # For pickling to work, the __module__ variable needs to be set to the frame
+ # where the named tuple is created. Bypass this step in environments where
+ # sys._getframe is not defined (Jython for example) or sys._getframe is not
+ # defined for arguments greater than 0 (IronPython).
+ try:
+ result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ pass
+
+ return result
+
+
+########################################################################
+### Counter
+########################################################################
+
+class Counter(dict):
+ '''Dict subclass for counting hashable items. Sometimes called a bag
+ or multiset. Elements are stored as dictionary keys and their counts
+ are stored as dictionary values.
+
+ >>> c = Counter('abcdeabcdabcaba') # count elements from a string
+
+ >>> c.most_common(3) # three most common elements
+ [('a', 5), ('b', 4), ('c', 3)]
+ >>> sorted(c) # list all unique elements
+ ['a', 'b', 'c', 'd', 'e']
+ >>> ''.join(sorted(c.elements())) # list elements with repetitions
+ 'aaaaabbbbcccdde'
+ >>> sum(c.values()) # total of all counts
+ 15
+
+ >>> c['a'] # count of letter 'a'
+ 5
+ >>> for elem in 'shazam': # update counts from an iterable
+ ... c[elem] += 1 # by adding 1 to each element's count
+ >>> c['a'] # now there are seven 'a'
+ 7
+ >>> del c['b'] # remove all 'b'
+ >>> c['b'] # now there are zero 'b'
+ 0
+
+ >>> d = Counter('simsalabim') # make another counter
+ >>> c.update(d) # add in the second counter
+ >>> c['a'] # now there are nine 'a'
+ 9
+
+ >>> c.clear() # empty the counter
+ >>> c
+ Counter()
+
+ Note: If a count is set to zero or reduced to zero, it will remain
+ in the counter until the entry is deleted or the counter is cleared:
+
+ >>> c = Counter('aaabbc')
+ >>> c['b'] -= 2 # reduce the count of 'b' by two
+ >>> c.most_common() # 'b' is still in, but its count is zero
+ [('a', 3), ('c', 1), ('b', 0)]
+
+ '''
+ # References:
+ # http://en.wikipedia.org/wiki/Multiset
+ # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
+ # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
+ # http://code.activestate.com/recipes/259174/
+ # Knuth, TAOCP Vol. II section 4.6.3
+
+ def __init__(*args, **kwds):
+ '''Create a new, empty Counter object. And if given, count elements
+ from an input iterable. Or, initialize the count from another mapping
+ of elements to their counts.
+
+ >>> c = Counter() # a new, empty counter
+ >>> c = Counter('gallahad') # a new counter from an iterable
+ >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
+ >>> c = Counter(a=4, b=2) # a new counter from keyword args
+
+ '''
+ if not args:
+ raise TypeError("descriptor '__init__' of 'Counter' object "
+ "needs an argument")
+ self = args[0]
+ args = args[1:]
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ super(Counter, self).__init__()
+ self.update(*args, **kwds)
+
+ def __missing__(self, key):
+ 'The count of elements not in the Counter is zero.'
+ # Needed so that self[missing_item] does not raise KeyError
+ return 0
+
+ def most_common(self, n=None):
+ '''List the n most common elements and their counts from the most
+ common to the least. If n is None, then list all element counts.
+
+ >>> Counter('abcdeabcdabcaba').most_common(3)
+ [('a', 5), ('b', 4), ('c', 3)]
+
+ '''
+ # Emulate Bag.sortedByCount from Smalltalk
+ if n is None:
+ return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
+ return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
+
+ def elements(self):
+ '''Iterator over elements repeating each as many times as its count.
+
+ >>> c = Counter('ABCABC')
+ >>> sorted(c.elements())
+ ['A', 'A', 'B', 'B', 'C', 'C']
+
+ # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
+ >>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
+ >>> product = 1
+ >>> for factor in prime_factors.elements(): # loop over factors
+ ... product *= factor # and multiply them
+ >>> product
+ 1836
+
+ Note, if an element's count has been set to zero or is a negative
+ number, elements() will ignore it.
+
+ '''
+ # Emulate Bag.do from Smalltalk and Multiset.begin from C++.
+ return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
+
+ # Override dict methods where necessary
+
+ @classmethod
+ def fromkeys(cls, iterable, v=None):
+ # There is no equivalent method for counters because setting v=1
+ # means that no element can have a count greater than one.
+ raise NotImplementedError(
+ 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
+
+ def update(*args, **kwds):
+ '''Like dict.update() but add counts instead of replacing them.
+
+ Source can be an iterable, a dictionary, or another Counter instance.
+
+ >>> c = Counter('which')
+ >>> c.update('witch') # add elements from another iterable
+ >>> d = Counter('watch')
+ >>> c.update(d) # add elements from another counter
+ >>> c['h'] # four 'h' in which, witch, and watch
+ 4
+
+ '''
+ # The regular dict.update() operation makes no sense here because the
+ # replace behavior results in the some of original untouched counts
+ # being mixed-in with all of the other counts for a mismash that
+ # doesn't have a straight-forward interpretation in most counting
+ # contexts. Instead, we implement straight-addition. Both the inputs
+ # and outputs are allowed to contain zero and negative counts.
+
+ if not args:
+ raise TypeError("descriptor 'update' of 'Counter' object "
+ "needs an argument")
+ self = args[0]
+ args = args[1:]
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ iterable = args[0] if args else None
+ if iterable is not None:
+ if isinstance(iterable, Mapping):
+ if self:
+ self_get = self.get
+ for elem, count in iterable.iteritems():
+ self[elem] = self_get(elem, 0) + count
+ else:
+ super(Counter, self).update(iterable) # fast path when counter is empty
+ else:
+ self_get = self.get
+ for elem in iterable:
+ self[elem] = self_get(elem, 0) + 1
+ if kwds:
+ self.update(kwds)
+
+ def subtract(*args, **kwds):
+ '''Like dict.update() but subtracts counts instead of replacing them.
+ Counts can be reduced below zero. Both the inputs and outputs are
+ allowed to contain zero and negative counts.
+
+ Source can be an iterable, a dictionary, or another Counter instance.
+
+ >>> c = Counter('which')
+ >>> c.subtract('witch') # subtract elements from another iterable
+ >>> c.subtract(Counter('watch')) # subtract elements from another counter
+ >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
+ 0
+ >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
+ -1
+
+ '''
+ if not args:
+ raise TypeError("descriptor 'subtract' of 'Counter' object "
+ "needs an argument")
+ self = args[0]
+ args = args[1:]
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ iterable = args[0] if args else None
+ if iterable is not None:
+ self_get = self.get
+ if isinstance(iterable, Mapping):
+ for elem, count in iterable.items():
+ self[elem] = self_get(elem, 0) - count
+ else:
+ for elem in iterable:
+ self[elem] = self_get(elem, 0) - 1
+ if kwds:
+ self.subtract(kwds)
+
+ def copy(self):
+ 'Return a shallow copy.'
+ return self.__class__(self)
+
+ def __reduce__(self):
+ return self.__class__, (dict(self),)
+
+ def __delitem__(self, elem):
+ 'Like dict.__delitem__() but does not raise KeyError for missing values.'
+ if elem in self:
+ super(Counter, self).__delitem__(elem)
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % self.__class__.__name__
+ items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
+ return '%s({%s})' % (self.__class__.__name__, items)
+
+ # Multiset-style mathematical operations discussed in:
+ # Knuth TAOCP Volume II section 4.6.3 exercise 19
+ # and at http://en.wikipedia.org/wiki/Multiset
+ #
+ # Outputs guaranteed to only include positive counts.
+ #
+ # To strip negative and zero counts, add-in an empty counter:
+ # c += Counter()
+
+ def __add__(self, other):
+ '''Add counts from two counters.
+
+ >>> Counter('abbb') + Counter('bcc')
+ Counter({'b': 4, 'c': 2, 'a': 1})
+
+ '''
+ if not isinstance(other, Counter):
+ return NotImplemented
+ result = Counter()
+ for elem, count in self.items():
+ newcount = count + other[elem]
+ if newcount > 0:
+ result[elem] = newcount
+ for elem, count in other.items():
+ if elem not in self and count > 0:
+ result[elem] = count
+ return result
+
+ def __sub__(self, other):
+ ''' Subtract count, but keep only results with positive counts.
+
+ >>> Counter('abbbc') - Counter('bccd')
+ Counter({'b': 2, 'a': 1})
+
+ '''
+ if not isinstance(other, Counter):
+ return NotImplemented
+ result = Counter()
+ for elem, count in self.items():
+ newcount = count - other[elem]
+ if newcount > 0:
+ result[elem] = newcount
+ for elem, count in other.items():
+ if elem not in self and count < 0:
+ result[elem] = 0 - count
+ return result
+
+ def __or__(self, other):
+ '''Union is the maximum of value in either of the input counters.
+
+ >>> Counter('abbb') | Counter('bcc')
+ Counter({'b': 3, 'c': 2, 'a': 1})
+
+ '''
+ if not isinstance(other, Counter):
+ return NotImplemented
+ result = Counter()
+ for elem, count in self.items():
+ other_count = other[elem]
+ newcount = other_count if count < other_count else count
+ if newcount > 0:
+ result[elem] = newcount
+ for elem, count in other.items():
+ if elem not in self and count > 0:
+ result[elem] = count
+ return result
+
+ def __and__(self, other):
+ ''' Intersection is the minimum of corresponding counts.
+
+ >>> Counter('abbb') & Counter('bcc')
+ Counter({'b': 1})
+
+ '''
+ if not isinstance(other, Counter):
+ return NotImplemented
+ result = Counter()
+ for elem, count in self.items():
+ other_count = other[elem]
+ newcount = count if count < other_count else other_count
+ if newcount > 0:
+ result[elem] = newcount
+ return result
+
+
+if __name__ == '__main__':
+ # verify that instances can be pickled
+ from cPickle import loads, dumps
+ Point = namedtuple('Point', 'x, y', True)
+ p = Point(x=10, y=20)
+ assert p == loads(dumps(p))
+
+ # test and demonstrate ability to override methods
+ class Point(namedtuple('Point', 'x y')):
+ __slots__ = ()
+ @property
+ def hypot(self):
+ return (self.x ** 2 + self.y ** 2) ** 0.5
+ def __str__(self):
+ return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
+
+ for p in Point(3, 4), Point(14, 5/7.):
+ print p
+
+ class Point(namedtuple('Point', 'x y')):
+ 'Point class with optimized _make() and _replace() without error-checking'
+ __slots__ = ()
+ _make = classmethod(tuple.__new__)
+ def _replace(self, _map=map, **kwds):
+ return self._make(_map(kwds.get, ('x', 'y'), self))
+
+ print Point(11, 22)._replace(x=100)
+
+ Point3D = namedtuple('Point3D', Point._fields + ('z',))
+ print Point3D.__doc__
+
+ import doctest
+ TestResults = namedtuple('TestResults', 'failed attempted')
+ print TestResults(*doctest.testmod())
+
+"""An extensible library for opening URLs using a variety of protocols
+
+The simplest way to use this module is to call the urlopen function,
+which accepts a string containing a URL or a Request object (described
+below). It opens the URL and returns the results as file-like
+object; the returned object has some extra methods described below.
+
+The OpenerDirector manages a collection of Handler objects that do
+all the actual work. Each Handler implements a particular protocol or
+option. The OpenerDirector is a composite object that invokes the
+Handlers needed to open the requested URL. For example, the
+HTTPHandler performs HTTP GET and POST requests and deals with
+non-error returns. The HTTPRedirectHandler automatically deals with
+HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
+deals with digest authentication.
+
+urlopen(url, data=None) -- Basic usage is the same as original
+urllib. pass the url and optionally data to post to an HTTP URL, and
+get a file-like object back. One difference is that you can also pass
+a Request instance instead of URL. Raises a URLError (subclass of
+IOError); for HTTP errors, raises an HTTPError, which can also be
+treated as a valid response.
+
+build_opener -- Function that creates a new OpenerDirector instance.
+Will install the default handlers. Accepts one or more Handlers as
+arguments, either instances or Handler classes that it will
+instantiate. If one of the argument is a subclass of the default
+handler, the argument will be installed instead of the default.
+
+install_opener -- Installs a new opener as the default opener.
+
+objects of interest:
+
+OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages
+the Handler classes, while dealing with requests and responses.
+
+Request -- An object that encapsulates the state of a request. The
+state can be as simple as the URL. It can also include extra HTTP
+headers, e.g. a User-Agent.
+
+BaseHandler --
+
+exceptions:
+URLError -- A subclass of IOError, individual protocols have their own
+specific subclass.
+
+HTTPError -- Also a valid HTTP response, so you can treat an HTTP error
+as an exceptional event or valid response.
+
+internals:
+BaseHandler and parent
+_call_chain conventions
+
+Example usage:
+
+import urllib2
+
+# set up authentication info
+authinfo = urllib2.HTTPBasicAuthHandler()
+authinfo.add_password(realm='PDQ Application',
+ uri='https://mahler:8092/site-updates.py',
+ user='klem',
+ passwd='geheim$parole')
+
+proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"})
+
+# build a new opener that adds authentication and caching FTP handlers
+opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler)
+
+# install it
+urllib2.install_opener(opener)
+
+f = urllib2.urlopen('http://www.python.org/')
+
+
+"""
+
+# XXX issues:
+# If an authentication error handler that tries to perform
+# authentication for some reason but fails, how should the error be
+# signalled? The client needs to know the HTTP error code. But if
+# the handler knows that the problem was, e.g., that it didn't know
+# that hash algo that requested in the challenge, it would be good to
+# pass that information along to the client, too.
+# ftp errors aren't handled cleanly
+# check digest against correct (i.e. non-apache) implementation
+
+# Possible extensions:
+# complex proxies XXX not sure what exactly was meant by this
+# abstract factory for opener
+
+import base64
+import hashlib
+import httplib
+import mimetools
+import os
+import posixpath
+import random
+import re
+import socket
+import sys
+import time
+import urlparse
+import bisect
+import warnings
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+# check for SSL
+try:
+ import ssl
+except ImportError:
+ _have_ssl = False
+else:
+ _have_ssl = True
+
+from urllib import (unwrap, unquote, splittype, splithost, quote,
+ addinfourl, splitport, splittag, toBytes,
+ splitattr, ftpwrapper, splituser, splitpasswd, splitvalue)
+
+# support for FileHandler, proxies via environment variables
+from urllib import localhost, url2pathname, getproxies, proxy_bypass
+
+# used in User-Agent header sent
+__version__ = sys.version[:3]
+
+_opener = None
+[docs]def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ cafile=None, capath=None, cadefault=False, context=None):
+ global _opener
+ if cafile or capath or cadefault:
+ if context is not None:
+ raise ValueError(
+ "You can't pass both context and any of cafile, capath, and "
+ "cadefault"
+ )
+ if not _have_ssl:
+ raise ValueError('SSL support not available')
+ context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH,
+ cafile=cafile,
+ capath=capath)
+ https_handler = HTTPSHandler(context=context)
+ opener = build_opener(https_handler)
+ elif context:
+ https_handler = HTTPSHandler(context=context)
+ opener = build_opener(https_handler)
+ elif _opener is None:
+ _opener = opener = build_opener()
+ else:
+ opener = _opener
+ return opener.open(url, data, timeout)
+
+
+
+# do these error classes make sense?
+# make sure all of the IOError stuff is overridden. we just want to be
+# subtypes.
+
+[docs]class URLError(IOError):
+ # URLError is a sub-type of IOError, but it doesn't share any of
+ # the implementation. need to override __init__ and __str__.
+ # It sets self.args for compatibility with other EnvironmentError
+ # subclasses, but args doesn't have the typical format with errno in
+ # slot 0 and strerror in slot 1. This may be better than nothing.
+ def __init__(self, reason):
+ self.args = reason,
+ self.reason = reason
+
+ def __str__(self):
+ return '<urlopen error %s>' % self.reason
+
+class HTTPError(URLError, addinfourl):
+ """Raised when HTTP error occurs, but also acts like non-error return"""
+ __super_init = addinfourl.__init__
+
+ def __init__(self, url, code, msg, hdrs, fp):
+ self.code = code
+ self.msg = msg
+ self.hdrs = hdrs
+ self.fp = fp
+ self.filename = url
+ # The addinfourl classes depend on fp being a valid file
+ # object. In some cases, the HTTPError may not have a valid
+ # file object. If this happens, the simplest workaround is to
+ # not initialize the base classes.
+ if fp is not None:
+ self.__super_init(fp, hdrs, url, code)
+
+ def __str__(self):
+ return 'HTTP Error %s: %s' % (self.code, self.msg)
+
+ # since URLError specifies a .reason attribute, HTTPError should also
+ # provide this attribute. See issue13211 fo discussion.
+ @property
+ def reason(self):
+ return self.msg
+
+ def info(self):
+ return self.hdrs
+
+# copied from cookielib.py
+_cut_port_re = re.compile(r":\d+$")
+def request_host(request):
+ """Return request-host, as defined by RFC 2965.
+
+ Variation from RFC: returned value is lowercased, for convenient
+ comparison.
+
+ """
+ url = request.get_full_url()
+ host = urlparse.urlparse(url)[1]
+ if host == "":
+ host = request.get_header("Host", "")
+
+ # remove port, if present
+ host = _cut_port_re.sub("", host, 1)
+ return host.lower()
+
+class Request:
+
+ def __init__(self, url, data=None, headers={},
+ origin_req_host=None, unverifiable=False):
+ # unwrap('<URL:type://host/path>') --> 'type://host/path'
+ self.__original = unwrap(url)
+ self.__original, self.__fragment = splittag(self.__original)
+ self.type = None
+ # self.__r_type is what's left after doing the splittype
+ self.host = None
+ self.port = None
+ self._tunnel_host = None
+ self.data = data
+ self.headers = {}
+ for key, value in headers.items():
+ self.add_header(key, value)
+ self.unredirected_hdrs = {}
+ if origin_req_host is None:
+ origin_req_host = request_host(self)
+ self.origin_req_host = origin_req_host
+ self.unverifiable = unverifiable
+
+ def __getattr__(self, attr):
+ # XXX this is a fallback mechanism to guard against these
+ # methods getting called in a non-standard order. this may be
+ # too complicated and/or unnecessary.
+ # XXX should the __r_XXX attributes be public?
+ if attr in ('_Request__r_type', '_Request__r_host'):
+ getattr(self, 'get_' + attr[12:])()
+ return self.__dict__[attr]
+ raise AttributeError, attr
+
+ def get_method(self):
+ if self.has_data():
+ return "POST"
+ else:
+ return "GET"
+
+ # XXX these helper methods are lame
+
+ def add_data(self, data):
+ self.data = data
+
+ def has_data(self):
+ return self.data is not None
+
+ def get_data(self):
+ return self.data
+
+ def get_full_url(self):
+ if self.__fragment:
+ return '%s#%s' % (self.__original, self.__fragment)
+ else:
+ return self.__original
+
+ def get_type(self):
+ if self.type is None:
+ self.type, self.__r_type = splittype(self.__original)
+ if self.type is None:
+ raise ValueError, "unknown url type: %s" % self.__original
+ return self.type
+
+ def get_host(self):
+ if self.host is None:
+ self.host, self.__r_host = splithost(self.__r_type)
+ if self.host:
+ self.host = unquote(self.host)
+ return self.host
+
+ def get_selector(self):
+ return self.__r_host
+
+ def set_proxy(self, host, type):
+ if self.type == 'https' and not self._tunnel_host:
+ self._tunnel_host = self.host
+ else:
+ self.type = type
+ self.__r_host = self.__original
+
+ self.host = host
+
+ def has_proxy(self):
+ return self.__r_host == self.__original
+
+ def get_origin_req_host(self):
+ return self.origin_req_host
+
+ def is_unverifiable(self):
+ return self.unverifiable
+
+ def add_header(self, key, val):
+ # useful for something like authentication
+ self.headers[key.capitalize()] = val
+
+ def add_unredirected_header(self, key, val):
+ # will not be added to a redirected request
+ self.unredirected_hdrs[key.capitalize()] = val
+
+ def has_header(self, header_name):
+ return (header_name in self.headers or
+ header_name in self.unredirected_hdrs)
+
+ def get_header(self, header_name, default=None):
+ return self.headers.get(
+ header_name,
+ self.unredirected_hdrs.get(header_name, default))
+
+ def header_items(self):
+ hdrs = self.unredirected_hdrs.copy()
+ hdrs.update(self.headers)
+ return hdrs.items()
+
+class OpenerDirector:
+ def __init__(self):
+ client_version = "Python-urllib/%s" % __version__
+ self.addheaders = [('User-agent', client_version)]
+ # self.handlers is retained only for backward compatibility
+ self.handlers = []
+ # manage the individual handlers
+ self.handle_open = {}
+ self.handle_error = {}
+ self.process_response = {}
+ self.process_request = {}
+
+ def add_handler(self, handler):
+ if not hasattr(handler, "add_parent"):
+ raise TypeError("expected BaseHandler instance, got %r" %
+ type(handler))
+
+ added = False
+ for meth in dir(handler):
+ if meth in ["redirect_request", "do_open", "proxy_open"]:
+ # oops, coincidental match
+ continue
+
+ i = meth.find("_")
+ protocol = meth[:i]
+ condition = meth[i+1:]
+
+ if condition.startswith("error"):
+ j = condition.find("_") + i + 1
+ kind = meth[j+1:]
+ try:
+ kind = int(kind)
+ except ValueError:
+ pass
+ lookup = self.handle_error.get(protocol, {})
+ self.handle_error[protocol] = lookup
+ elif condition == "open":
+ kind = protocol
+ lookup = self.handle_open
+ elif condition == "response":
+ kind = protocol
+ lookup = self.process_response
+ elif condition == "request":
+ kind = protocol
+ lookup = self.process_request
+ else:
+ continue
+
+ handlers = lookup.setdefault(kind, [])
+ if handlers:
+ bisect.insort(handlers, handler)
+ else:
+ handlers.append(handler)
+ added = True
+
+ if added:
+ bisect.insort(self.handlers, handler)
+ handler.add_parent(self)
+
+ def close(self):
+ # Only exists for backwards compatibility.
+ pass
+
+ def _call_chain(self, chain, kind, meth_name, *args):
+ # Handlers raise an exception if no one else should try to handle
+ # the request, or return None if they can't but another handler
+ # could. Otherwise, they return the response.
+ handlers = chain.get(kind, ())
+ for handler in handlers:
+ func = getattr(handler, meth_name)
+
+ result = func(*args)
+ if result is not None:
+ return result
+
+ def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
+ # accept a URL or a Request object
+ if isinstance(fullurl, basestring):
+ req = Request(fullurl, data)
+ else:
+ req = fullurl
+ if data is not None:
+ req.add_data(data)
+
+ req.timeout = timeout
+ protocol = req.get_type()
+
+ # pre-process request
+ meth_name = protocol+"_request"
+ for processor in self.process_request.get(protocol, []):
+ meth = getattr(processor, meth_name)
+ req = meth(req)
+
+ response = self._open(req, data)
+
+ # post-process response
+ meth_name = protocol+"_response"
+ for processor in self.process_response.get(protocol, []):
+ meth = getattr(processor, meth_name)
+ response = meth(req, response)
+
+ return response
+
+ def _open(self, req, data=None):
+ result = self._call_chain(self.handle_open, 'default',
+ 'default_open', req)
+ if result:
+ return result
+
+ protocol = req.get_type()
+ result = self._call_chain(self.handle_open, protocol, protocol +
+ '_open', req)
+ if result:
+ return result
+
+ return self._call_chain(self.handle_open, 'unknown',
+ 'unknown_open', req)
+
+ def error(self, proto, *args):
+ if proto in ('http', 'https'):
+ # XXX http[s] protocols are special-cased
+ dict = self.handle_error['http'] # https is not different than http
+ proto = args[2] # YUCK!
+ meth_name = 'http_error_%s' % proto
+ http_err = 1
+ orig_args = args
+ else:
+ dict = self.handle_error
+ meth_name = proto + '_error'
+ http_err = 0
+ args = (dict, proto, meth_name) + args
+ result = self._call_chain(*args)
+ if result:
+ return result
+
+ if http_err:
+ args = (dict, 'default', 'http_error_default') + orig_args
+ return self._call_chain(*args)
+
+# XXX probably also want an abstract factory that knows when it makes
+# sense to skip a superclass in favor of a subclass and when it might
+# make sense to include both
+
+[docs]def build_opener(*handlers):
+ """Create an opener object from a list of handlers.
+
+ The opener will use several default handlers, including support
+ for HTTP, FTP and when applicable, HTTPS.
+
+ If any of the handlers passed as arguments are subclasses of the
+ default handlers, the default handlers will not be used.
+ """
+ import types
+ def isclass(obj):
+ return isinstance(obj, (types.ClassType, type))
+
+ opener = OpenerDirector()
+ default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
+ HTTPDefaultErrorHandler, HTTPRedirectHandler,
+ FTPHandler, FileHandler, HTTPErrorProcessor]
+ if hasattr(httplib, 'HTTPS'):
+ default_classes.append(HTTPSHandler)
+ skip = set()
+ for klass in default_classes:
+ for check in handlers:
+ if isclass(check):
+ if issubclass(check, klass):
+ skip.add(klass)
+ elif isinstance(check, klass):
+ skip.add(klass)
+ for klass in skip:
+ default_classes.remove(klass)
+
+ for klass in default_classes:
+ opener.add_handler(klass())
+
+ for h in handlers:
+ if isclass(h):
+ h = h()
+ opener.add_handler(h)
+ return opener
+
+class BaseHandler:
+ handler_order = 500
+
+ def add_parent(self, parent):
+ self.parent = parent
+
+ def close(self):
+ # Only exists for backwards compatibility
+ pass
+
+ def __lt__(self, other):
+ if not hasattr(other, "handler_order"):
+ # Try to preserve the old behavior of having custom classes
+ # inserted after default ones (works only for custom user
+ # classes which are not aware of handler_order).
+ return True
+ return self.handler_order < other.handler_order
+
+
+class HTTPErrorProcessor(BaseHandler):
+ """Process HTTP error responses."""
+ handler_order = 1000 # after all other processing
+
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+ https_response = http_response
+
+class HTTPDefaultErrorHandler(BaseHandler):
+ def http_error_default(self, req, fp, code, msg, hdrs):
+ raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
+
+class HTTPRedirectHandler(BaseHandler):
+ # maximum number of redirections to any single URL
+ # this is needed because of the state that cookies introduce
+ max_repeats = 4
+ # maximum total number of redirections (regardless of URL) before
+ # assuming we're in a loop
+ max_redirections = 10
+
+ def redirect_request(self, req, fp, code, msg, headers, newurl):
+ """Return a Request or None in response to a redirect.
+
+ This is called by the http_error_30x methods when a
+ redirection response is received. If a redirection should
+ take place, return a new Request to allow http_error_30x to
+ perform the redirect. Otherwise, raise HTTPError if no-one
+ else should try to handle this url. Return None if you can't
+ but another Handler might.
+ """
+ m = req.get_method()
+ if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
+ or code in (301, 302, 303) and m == "POST"):
+ # Strictly (according to RFC 2616), 301 or 302 in response
+ # to a POST MUST NOT cause a redirection without confirmation
+ # from the user (of urllib2, in this case). In practice,
+ # essentially all clients do redirect in this case, so we
+ # do the same.
+ # be conciliant with URIs containing a space
+ newurl = newurl.replace(' ', '%20')
+ newheaders = dict((k,v) for k,v in req.headers.items()
+ if k.lower() not in ("content-length", "content-type")
+ )
+ return Request(newurl,
+ headers=newheaders,
+ origin_req_host=req.get_origin_req_host(),
+ unverifiable=True)
+ else:
+ raise HTTPError(req.get_full_url(), code, msg, headers, fp)
+
+ # Implementation note: To avoid the server sending us into an
+ # infinite loop, the request object needs to track what URLs we
+ # have already seen. Do this by adding a handler-specific
+ # attribute to the Request object.
+ def http_error_302(self, req, fp, code, msg, headers):
+ # Some servers (incorrectly) return multiple Location headers
+ # (so probably same goes for URI). Use first header.
+ if 'location' in headers:
+ newurl = headers.getheaders('location')[0]
+ elif 'uri' in headers:
+ newurl = headers.getheaders('uri')[0]
+ else:
+ return
+
+ # fix a possible malformed URL
+ urlparts = urlparse.urlparse(newurl)
+ if not urlparts.path and urlparts.netloc:
+ urlparts = list(urlparts)
+ urlparts[2] = "/"
+ newurl = urlparse.urlunparse(urlparts)
+
+ newurl = urlparse.urljoin(req.get_full_url(), newurl)
+
+ # For security reasons we do not allow redirects to protocols
+ # other than HTTP, HTTPS or FTP.
+ newurl_lower = newurl.lower()
+ if not (newurl_lower.startswith('http://') or
+ newurl_lower.startswith('https://') or
+ newurl_lower.startswith('ftp://')):
+ raise HTTPError(newurl, code,
+ msg + " - Redirection to url '%s' is not allowed" %
+ newurl,
+ headers, fp)
+
+ # XXX Probably want to forget about the state of the current
+ # request, although that might interact poorly with other
+ # handlers that also use handler-specific request attributes
+ new = self.redirect_request(req, fp, code, msg, headers, newurl)
+ if new is None:
+ return
+
+ # loop detection
+ # .redirect_dict has a key url if url was previously visited.
+ if hasattr(req, 'redirect_dict'):
+ visited = new.redirect_dict = req.redirect_dict
+ if (visited.get(newurl, 0) >= self.max_repeats or
+ len(visited) >= self.max_redirections):
+ raise HTTPError(req.get_full_url(), code,
+ self.inf_msg + msg, headers, fp)
+ else:
+ visited = new.redirect_dict = req.redirect_dict = {}
+ visited[newurl] = visited.get(newurl, 0) + 1
+
+ # Don't close the fp until we are sure that we won't use it
+ # with HTTPError.
+ fp.read()
+ fp.close()
+
+ return self.parent.open(new, timeout=req.timeout)
+
+ http_error_301 = http_error_303 = http_error_307 = http_error_302
+
+ inf_msg = "The HTTP server returned a redirect error that would " \
+ "lead to an infinite loop.\n" \
+ "The last 30x error message was:\n"
+
+
+def _parse_proxy(proxy):
+ """Return (scheme, user, password, host/port) given a URL or an authority.
+
+ If a URL is supplied, it must have an authority (host:port) component.
+ According to RFC 3986, having an authority component means the URL must
+ have two slashes after the scheme:
+
+ >>> _parse_proxy('file:/ftp.example.com/')
+ Traceback (most recent call last):
+ ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
+
+ The first three items of the returned tuple may be None.
+
+ Examples of authority parsing:
+
+ >>> _parse_proxy('proxy.example.com')
+ (None, None, None, 'proxy.example.com')
+ >>> _parse_proxy('proxy.example.com:3128')
+ (None, None, None, 'proxy.example.com:3128')
+
+ The authority component may optionally include userinfo (assumed to be
+ username:password):
+
+ >>> _parse_proxy('joe:password@proxy.example.com')
+ (None, 'joe', 'password', 'proxy.example.com')
+ >>> _parse_proxy('joe:password@proxy.example.com:3128')
+ (None, 'joe', 'password', 'proxy.example.com:3128')
+
+ Same examples, but with URLs instead:
+
+ >>> _parse_proxy('http://proxy.example.com/')
+ ('http', None, None, 'proxy.example.com')
+ >>> _parse_proxy('http://proxy.example.com:3128/')
+ ('http', None, None, 'proxy.example.com:3128')
+ >>> _parse_proxy('http://joe:password@proxy.example.com/')
+ ('http', 'joe', 'password', 'proxy.example.com')
+ >>> _parse_proxy('http://joe:password@proxy.example.com:3128')
+ ('http', 'joe', 'password', 'proxy.example.com:3128')
+
+ Everything after the authority is ignored:
+
+ >>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
+ ('ftp', 'joe', 'password', 'proxy.example.com')
+
+ Test for no trailing '/' case:
+
+ >>> _parse_proxy('http://joe:password@proxy.example.com')
+ ('http', 'joe', 'password', 'proxy.example.com')
+
+ """
+ scheme, r_scheme = splittype(proxy)
+ if not r_scheme.startswith("/"):
+ # authority
+ scheme = None
+ authority = proxy
+ else:
+ # URL
+ if not r_scheme.startswith("//"):
+ raise ValueError("proxy URL with no authority: %r" % proxy)
+ # We have an authority, so for RFC 3986-compliant URLs (by ss 3.
+ # and 3.3.), path is empty or starts with '/'
+ end = r_scheme.find("/", 2)
+ if end == -1:
+ end = None
+ authority = r_scheme[2:end]
+ userinfo, hostport = splituser(authority)
+ if userinfo is not None:
+ user, password = splitpasswd(userinfo)
+ else:
+ user = password = None
+ return scheme, user, password, hostport
+
+[docs]class ProxyHandler(BaseHandler):
+ # Proxies must be in front
+ handler_order = 100
+
+[docs] def __init__(self, proxies=None):
+ if proxies is None:
+ proxies = getproxies()
+ assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
+ self.proxies = proxies
+ for type, url in proxies.items():
+ setattr(self, '%s_open' % type,
+ lambda r, proxy=url, type=type, meth=self.proxy_open: \
+ meth(r, proxy, type))
+
+ def proxy_open(self, req, proxy, type):
+ orig_type = req.get_type()
+ proxy_type, user, password, hostport = _parse_proxy(proxy)
+
+ if proxy_type is None:
+ proxy_type = orig_type
+
+ if req.host and proxy_bypass(req.host):
+ return None
+
+ if user and password:
+ user_pass = '%s:%s' % (unquote(user), unquote(password))
+ creds = base64.b64encode(user_pass).strip()
+ req.add_header('Proxy-authorization', 'Basic ' + creds)
+ hostport = unquote(hostport)
+ req.set_proxy(hostport, proxy_type)
+
+ if orig_type == proxy_type or orig_type == 'https':
+ # let other handlers take care of it
+ return None
+ else:
+ # need to start over, because the other handlers don't
+ # grok the proxy's URL type
+ # e.g. if we have a constructor arg proxies like so:
+ # {'http': 'ftp://proxy.example.com'}, we may end up turning
+ # a request for http://acme.example.com/a into one for
+ # ftp://proxy.example.com/a
+ return self.parent.open(req, timeout=req.timeout)
+
+class HTTPPasswordMgr:
+
+ def __init__(self):
+ self.passwd = {}
+
+ def add_password(self, realm, uri, user, passwd):
+ # uri could be a single URI or a sequence
+ if isinstance(uri, basestring):
+ uri = [uri]
+ if not realm in self.passwd:
+ self.passwd[realm] = {}
+ for default_port in True, False:
+ reduced_uri = tuple(
+ [self.reduce_uri(u, default_port) for u in uri])
+ self.passwd[realm][reduced_uri] = (user, passwd)
+
+ def find_user_password(self, realm, authuri):
+ domains = self.passwd.get(realm, {})
+ for default_port in True, False:
+ reduced_authuri = self.reduce_uri(authuri, default_port)
+ for uris, authinfo in domains.iteritems():
+ for uri in uris:
+ if self.is_suburi(uri, reduced_authuri):
+ return authinfo
+ return None, None
+
+ def reduce_uri(self, uri, default_port=True):
+ """Accept authority or URI and extract only the authority and path."""
+ # note HTTP URLs do not have a userinfo component
+ parts = urlparse.urlsplit(uri)
+ if parts[1]:
+ # URI
+ scheme = parts[0]
+ authority = parts[1]
+ path = parts[2] or '/'
+ else:
+ # host or host:port
+ scheme = None
+ authority = uri
+ path = '/'
+ host, port = splitport(authority)
+ if default_port and port is None and scheme is not None:
+ dport = {"http": 80,
+ "https": 443,
+ }.get(scheme)
+ if dport is not None:
+ authority = "%s:%d" % (host, dport)
+ return authority, path
+
+ def is_suburi(self, base, test):
+ """Check if test is below base in a URI tree
+
+ Both args must be URIs in reduced form.
+ """
+ if base == test:
+ return True
+ if base[0] != test[0]:
+ return False
+ common = posixpath.commonprefix((base[1], test[1]))
+ if len(common) == len(base[1]):
+ return True
+ return False
+
+
+class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
+
+ def find_user_password(self, realm, authuri):
+ user, password = HTTPPasswordMgr.find_user_password(self, realm,
+ authuri)
+ if user is not None:
+ return user, password
+ return HTTPPasswordMgr.find_user_password(self, None, authuri)
+
+
+class AbstractBasicAuthHandler:
+
+ # XXX this allows for multiple auth-schemes, but will stupidly pick
+ # the last one with a realm specified.
+
+ # allow for double- and single-quoted realm values
+ # (single quotes are a violation of the RFC, but appear in the wild)
+ rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
+ 'realm=(["\']?)([^"\']*)\\2', re.I)
+
+ # XXX could pre-emptively send auth info already accepted (RFC 2617,
+ # end of section 2, and section 1.2 immediately after "credentials"
+ # production).
+
+ def __init__(self, password_mgr=None):
+ if password_mgr is None:
+ password_mgr = HTTPPasswordMgr()
+ self.passwd = password_mgr
+ self.add_password = self.passwd.add_password
+
+
+ def http_error_auth_reqed(self, authreq, host, req, headers):
+ # host may be an authority (without userinfo) or a URL with an
+ # authority
+ # XXX could be multiple headers
+ authreq = headers.get(authreq, None)
+
+ if authreq:
+ mo = AbstractBasicAuthHandler.rx.search(authreq)
+ if mo:
+ scheme, quote, realm = mo.groups()
+ if quote not in ['"', "'"]:
+ warnings.warn("Basic Auth Realm was unquoted",
+ UserWarning, 2)
+ if scheme.lower() == 'basic':
+ return self.retry_http_basic_auth(host, req, realm)
+
+ def retry_http_basic_auth(self, host, req, realm):
+ user, pw = self.passwd.find_user_password(realm, host)
+ if pw is not None:
+ raw = "%s:%s" % (user, pw)
+ auth = 'Basic %s' % base64.b64encode(raw).strip()
+ if req.get_header(self.auth_header, None) == auth:
+ return None
+ req.add_unredirected_header(self.auth_header, auth)
+ return self.parent.open(req, timeout=req.timeout)
+ else:
+ return None
+
+
+class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
+
+ auth_header = 'Authorization'
+
+ def http_error_401(self, req, fp, code, msg, headers):
+ url = req.get_full_url()
+ response = self.http_error_auth_reqed('www-authenticate',
+ url, req, headers)
+ return response
+
+
+class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
+
+ auth_header = 'Proxy-authorization'
+
+ def http_error_407(self, req, fp, code, msg, headers):
+ # http_error_auth_reqed requires that there is no userinfo component in
+ # authority. Assume there isn't one, since urllib2 does not (and
+ # should not, RFC 3986 s. 3.2.1) support requests for URLs containing
+ # userinfo.
+ authority = req.get_host()
+ response = self.http_error_auth_reqed('proxy-authenticate',
+ authority, req, headers)
+ return response
+
+
+def randombytes(n):
+ """Return n random bytes."""
+ # Use /dev/urandom if it is available. Fall back to random module
+ # if not. It might be worthwhile to extend this function to use
+ # other platform-specific mechanisms for getting random bytes.
+ if os.path.exists("/dev/urandom"):
+ f = open("/dev/urandom")
+ s = f.read(n)
+ f.close()
+ return s
+ else:
+ L = [chr(random.randrange(0, 256)) for i in range(n)]
+ return "".join(L)
+
+class AbstractDigestAuthHandler:
+ # Digest authentication is specified in RFC 2617.
+
+ # XXX The client does not inspect the Authentication-Info header
+ # in a successful response.
+
+ # XXX It should be possible to test this implementation against
+ # a mock server that just generates a static set of challenges.
+
+ # XXX qop="auth-int" supports is shaky
+
+ def __init__(self, passwd=None):
+ if passwd is None:
+ passwd = HTTPPasswordMgr()
+ self.passwd = passwd
+ self.add_password = self.passwd.add_password
+ self.retried = 0
+ self.nonce_count = 0
+ self.last_nonce = None
+
+ def reset_retry_count(self):
+ self.retried = 0
+
+ def http_error_auth_reqed(self, auth_header, host, req, headers):
+ authreq = headers.get(auth_header, None)
+ if self.retried > 5:
+ # Don't fail endlessly - if we failed once, we'll probably
+ # fail a second time. Hm. Unless the Password Manager is
+ # prompting for the information. Crap. This isn't great
+ # but it's better than the current 'repeat until recursion
+ # depth exceeded' approach <wink>
+ raise HTTPError(req.get_full_url(), 401, "digest auth failed",
+ headers, None)
+ else:
+ self.retried += 1
+ if authreq:
+ scheme = authreq.split()[0]
+ if scheme.lower() == 'digest':
+ return self.retry_http_digest_auth(req, authreq)
+
+ def retry_http_digest_auth(self, req, auth):
+ token, challenge = auth.split(' ', 1)
+ chal = parse_keqv_list(parse_http_list(challenge))
+ auth = self.get_authorization(req, chal)
+ if auth:
+ auth_val = 'Digest %s' % auth
+ if req.headers.get(self.auth_header, None) == auth_val:
+ return None
+ req.add_unredirected_header(self.auth_header, auth_val)
+ resp = self.parent.open(req, timeout=req.timeout)
+ return resp
+
+ def get_cnonce(self, nonce):
+ # The cnonce-value is an opaque
+ # quoted string value provided by the client and used by both client
+ # and server to avoid chosen plaintext attacks, to provide mutual
+ # authentication, and to provide some message integrity protection.
+ # This isn't a fabulous effort, but it's probably Good Enough.
+ dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(),
+ randombytes(8))).hexdigest()
+ return dig[:16]
+
+ def get_authorization(self, req, chal):
+ try:
+ realm = chal['realm']
+ nonce = chal['nonce']
+ qop = chal.get('qop')
+ algorithm = chal.get('algorithm', 'MD5')
+ # mod_digest doesn't send an opaque, even though it isn't
+ # supposed to be optional
+ opaque = chal.get('opaque', None)
+ except KeyError:
+ return None
+
+ H, KD = self.get_algorithm_impls(algorithm)
+ if H is None:
+ return None
+
+ user, pw = self.passwd.find_user_password(realm, req.get_full_url())
+ if user is None:
+ return None
+
+ # XXX not implemented yet
+ if req.has_data():
+ entdig = self.get_entity_digest(req.get_data(), chal)
+ else:
+ entdig = None
+
+ A1 = "%s:%s:%s" % (user, realm, pw)
+ A2 = "%s:%s" % (req.get_method(),
+ # XXX selector: what about proxies and full urls
+ req.get_selector())
+ if qop == 'auth':
+ if nonce == self.last_nonce:
+ self.nonce_count += 1
+ else:
+ self.nonce_count = 1
+ self.last_nonce = nonce
+
+ ncvalue = '%08x' % self.nonce_count
+ cnonce = self.get_cnonce(nonce)
+ noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
+ respdig = KD(H(A1), noncebit)
+ elif qop is None:
+ respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
+ else:
+ # XXX handle auth-int.
+ raise URLError("qop '%s' is not supported." % qop)
+
+ # XXX should the partial digests be encoded too?
+
+ base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
+ 'response="%s"' % (user, realm, nonce, req.get_selector(),
+ respdig)
+ if opaque:
+ base += ', opaque="%s"' % opaque
+ if entdig:
+ base += ', digest="%s"' % entdig
+ base += ', algorithm="%s"' % algorithm
+ if qop:
+ base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
+ return base
+
+ def get_algorithm_impls(self, algorithm):
+ # algorithm should be case-insensitive according to RFC2617
+ algorithm = algorithm.upper()
+ # lambdas assume digest modules are imported at the top level
+ if algorithm == 'MD5':
+ H = lambda x: hashlib.md5(x).hexdigest()
+ elif algorithm == 'SHA':
+ H = lambda x: hashlib.sha1(x).hexdigest()
+ # XXX MD5-sess
+ else:
+ raise ValueError("Unsupported digest authentication "
+ "algorithm %r" % algorithm.lower())
+ KD = lambda s, d: H("%s:%s" % (s, d))
+ return H, KD
+
+ def get_entity_digest(self, data, chal):
+ # XXX not implemented yet
+ return None
+
+
+class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
+ """An authentication protocol defined by RFC 2069
+
+ Digest authentication improves on basic authentication because it
+ does not transmit passwords in the clear.
+ """
+
+ auth_header = 'Authorization'
+ handler_order = 490 # before Basic auth
+
+ def http_error_401(self, req, fp, code, msg, headers):
+ host = urlparse.urlparse(req.get_full_url())[1]
+ retry = self.http_error_auth_reqed('www-authenticate',
+ host, req, headers)
+ self.reset_retry_count()
+ return retry
+
+
+class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
+
+ auth_header = 'Proxy-Authorization'
+ handler_order = 490 # before Basic auth
+
+ def http_error_407(self, req, fp, code, msg, headers):
+ host = req.get_host()
+ retry = self.http_error_auth_reqed('proxy-authenticate',
+ host, req, headers)
+ self.reset_retry_count()
+ return retry
+
+class AbstractHTTPHandler(BaseHandler):
+
+ def __init__(self, debuglevel=0):
+ self._debuglevel = debuglevel
+
+ def set_http_debuglevel(self, level):
+ self._debuglevel = level
+
+ def do_request_(self, request):
+ host = request.get_host()
+ if not host:
+ raise URLError('no host given')
+
+ if request.has_data(): # POST
+ data = request.get_data()
+ if not request.has_header('Content-type'):
+ request.add_unredirected_header(
+ 'Content-type',
+ 'application/x-www-form-urlencoded')
+ if not request.has_header('Content-length'):
+ request.add_unredirected_header(
+ 'Content-length', '%d' % len(data))
+
+ sel_host = host
+ if request.has_proxy():
+ scheme, sel = splittype(request.get_selector())
+ sel_host, sel_path = splithost(sel)
+
+ if not request.has_header('Host'):
+ request.add_unredirected_header('Host', sel_host)
+ for name, value in self.parent.addheaders:
+ name = name.capitalize()
+ if not request.has_header(name):
+ request.add_unredirected_header(name, value)
+
+ return request
+
+ def do_open(self, http_class, req, **http_conn_args):
+ """Return an addinfourl object for the request, using http_class.
+
+ http_class must implement the HTTPConnection API from httplib.
+ The addinfourl return value is a file-like object. It also
+ has methods and attributes including:
+ - info(): return a mimetools.Message object for the headers
+ - geturl(): return the original request URL
+ - code: HTTP status code
+ """
+ host = req.get_host()
+ if not host:
+ raise URLError('no host given')
+
+ # will parse host:port
+ h = http_class(host, timeout=req.timeout, **http_conn_args)
+ h.set_debuglevel(self._debuglevel)
+
+ headers = dict(req.unredirected_hdrs)
+ headers.update(dict((k, v) for k, v in req.headers.items()
+ if k not in headers))
+
+ # We want to make an HTTP/1.1 request, but the addinfourl
+ # class isn't prepared to deal with a persistent connection.
+ # It will try to read all remaining data from the socket,
+ # which will block while the server waits for the next request.
+ # So make sure the connection gets closed after the (only)
+ # request.
+ headers["Connection"] = "close"
+ headers = dict(
+ (name.title(), val) for name, val in headers.items())
+
+ if req._tunnel_host:
+ tunnel_headers = {}
+ proxy_auth_hdr = "Proxy-Authorization"
+ if proxy_auth_hdr in headers:
+ tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
+ # Proxy-Authorization should not be sent to origin
+ # server.
+ del headers[proxy_auth_hdr]
+ h.set_tunnel(req._tunnel_host, headers=tunnel_headers)
+
+ try:
+ h.request(req.get_method(), req.get_selector(), req.data, headers)
+ except socket.error, err: # XXX what error?
+ h.close()
+ raise URLError(err)
+ else:
+ try:
+ r = h.getresponse(buffering=True)
+ except TypeError: # buffering kw not supported
+ r = h.getresponse()
+
+ # Pick apart the HTTPResponse object to get the addinfourl
+ # object initialized properly.
+
+ # Wrap the HTTPResponse object in socket's file object adapter
+ # for Windows. That adapter calls recv(), so delegate recv()
+ # to read(). This weird wrapping allows the returned object to
+ # have readline() and readlines() methods.
+
+ # XXX It might be better to extract the read buffering code
+ # out of socket._fileobject() and into a base class.
+
+ r.recv = r.read
+ fp = socket._fileobject(r, close=True)
+
+ resp = addinfourl(fp, r.msg, req.get_full_url())
+ resp.code = r.status
+ resp.msg = r.reason
+ return resp
+
+
+class HTTPHandler(AbstractHTTPHandler):
+
+ def http_open(self, req):
+ return self.do_open(httplib.HTTPConnection, req)
+
+ http_request = AbstractHTTPHandler.do_request_
+
+if hasattr(httplib, 'HTTPS'):
+ class HTTPSHandler(AbstractHTTPHandler):
+
+ def __init__(self, debuglevel=0, context=None):
+ AbstractHTTPHandler.__init__(self, debuglevel)
+ self._context = context
+
+ def https_open(self, req):
+ return self.do_open(httplib.HTTPSConnection, req,
+ context=self._context)
+
+ https_request = AbstractHTTPHandler.do_request_
+
+class HTTPCookieProcessor(BaseHandler):
+ def __init__(self, cookiejar=None):
+ import cookielib
+ if cookiejar is None:
+ cookiejar = cookielib.CookieJar()
+ self.cookiejar = cookiejar
+
+ def http_request(self, request):
+ self.cookiejar.add_cookie_header(request)
+ return request
+
+ def http_response(self, request, response):
+ self.cookiejar.extract_cookies(response, request)
+ return response
+
+ https_request = http_request
+ https_response = http_response
+
+class UnknownHandler(BaseHandler):
+ def unknown_open(self, req):
+ type = req.get_type()
+ raise URLError('unknown url type: %s' % type)
+
+def parse_keqv_list(l):
+ """Parse list of key=value strings where keys are not duplicated."""
+ parsed = {}
+ for elt in l:
+ k, v = elt.split('=', 1)
+ if v[0] == '"' and v[-1] == '"':
+ v = v[1:-1]
+ parsed[k] = v
+ return parsed
+
+def parse_http_list(s):
+ """Parse lists as described by RFC 2068 Section 2.
+
+ In particular, parse comma-separated lists where the elements of
+ the list may include quoted-strings. A quoted-string could
+ contain a comma. A non-quoted string could have quotes in the
+ middle. Neither commas nor quotes count if they are escaped.
+ Only double-quotes count, not single-quotes.
+ """
+ res = []
+ part = ''
+
+ escape = quote = False
+ for cur in s:
+ if escape:
+ part += cur
+ escape = False
+ continue
+ if quote:
+ if cur == '\\':
+ escape = True
+ continue
+ elif cur == '"':
+ quote = False
+ part += cur
+ continue
+
+ if cur == ',':
+ res.append(part)
+ part = ''
+ continue
+
+ if cur == '"':
+ quote = True
+
+ part += cur
+
+ # append last part
+ if part:
+ res.append(part)
+
+ return [part.strip() for part in res]
+
+def _safe_gethostbyname(host):
+ try:
+ return socket.gethostbyname(host)
+ except socket.gaierror:
+ return None
+
+class FileHandler(BaseHandler):
+ # Use local file or FTP depending on form of URL
+ def file_open(self, req):
+ url = req.get_selector()
+ if url[:2] == '//' and url[2:3] != '/' and (req.host and
+ req.host != 'localhost'):
+ req.type = 'ftp'
+ return self.parent.open(req)
+ else:
+ return self.open_local_file(req)
+
+ # names for the localhost
+ names = None
+ def get_names(self):
+ if FileHandler.names is None:
+ try:
+ FileHandler.names = tuple(
+ socket.gethostbyname_ex('localhost')[2] +
+ socket.gethostbyname_ex(socket.gethostname())[2])
+ except socket.gaierror:
+ FileHandler.names = (socket.gethostbyname('localhost'),)
+ return FileHandler.names
+
+ # not entirely sure what the rules are here
+ def open_local_file(self, req):
+ import email.utils
+ import mimetypes
+ host = req.get_host()
+ filename = req.get_selector()
+ localfile = url2pathname(filename)
+ try:
+ stats = os.stat(localfile)
+ size = stats.st_size
+ modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
+ mtype = mimetypes.guess_type(filename)[0]
+ headers = mimetools.Message(StringIO(
+ 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
+ (mtype or 'text/plain', size, modified)))
+ if host:
+ host, port = splitport(host)
+ if not host or \
+ (not port and _safe_gethostbyname(host) in self.get_names()):
+ if host:
+ origurl = 'file://' + host + filename
+ else:
+ origurl = 'file://' + filename
+ return addinfourl(open(localfile, 'rb'), headers, origurl)
+ except OSError, msg:
+ # urllib2 users shouldn't expect OSErrors coming from urlopen()
+ raise URLError(msg)
+ raise URLError('file not on local host')
+
+class FTPHandler(BaseHandler):
+ def ftp_open(self, req):
+ import ftplib
+ import mimetypes
+ host = req.get_host()
+ if not host:
+ raise URLError('ftp error: no host given')
+ host, port = splitport(host)
+ if port is None:
+ port = ftplib.FTP_PORT
+ else:
+ port = int(port)
+
+ # username/password handling
+ user, host = splituser(host)
+ if user:
+ user, passwd = splitpasswd(user)
+ else:
+ passwd = None
+ host = unquote(host)
+ user = user or ''
+ passwd = passwd or ''
+
+ try:
+ host = socket.gethostbyname(host)
+ except socket.error, msg:
+ raise URLError(msg)
+ path, attrs = splitattr(req.get_selector())
+ dirs = path.split('/')
+ dirs = map(unquote, dirs)
+ dirs, file = dirs[:-1], dirs[-1]
+ if dirs and not dirs[0]:
+ dirs = dirs[1:]
+ try:
+ fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
+ type = file and 'I' or 'D'
+ for attr in attrs:
+ attr, value = splitvalue(attr)
+ if attr.lower() == 'type' and \
+ value in ('a', 'A', 'i', 'I', 'd', 'D'):
+ type = value.upper()
+ fp, retrlen = fw.retrfile(file, type)
+ headers = ""
+ mtype = mimetypes.guess_type(req.get_full_url())[0]
+ if mtype:
+ headers += "Content-type: %s\n" % mtype
+ if retrlen is not None and retrlen >= 0:
+ headers += "Content-length: %d\n" % retrlen
+ sf = StringIO(headers)
+ headers = mimetools.Message(sf)
+ return addinfourl(fp, headers, req.get_full_url())
+ except ftplib.all_errors, msg:
+ raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2]
+
+ def connect_ftp(self, user, passwd, host, port, dirs, timeout):
+ fw = ftpwrapper(user, passwd, host, port, dirs, timeout,
+ persistent=False)
+## fw.ftp.set_debuglevel(1)
+ return fw
+
+class CacheFTPHandler(FTPHandler):
+ # XXX would be nice to have pluggable cache strategies
+ # XXX this stuff is definitely not thread safe
+ def __init__(self):
+ self.cache = {}
+ self.timeout = {}
+ self.soonest = 0
+ self.delay = 60
+ self.max_conns = 16
+
+ def setTimeout(self, t):
+ self.delay = t
+
+ def setMaxConns(self, m):
+ self.max_conns = m
+
+ def connect_ftp(self, user, passwd, host, port, dirs, timeout):
+ key = user, host, port, '/'.join(dirs), timeout
+ if key in self.cache:
+ self.timeout[key] = time.time() + self.delay
+ else:
+ self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
+ self.timeout[key] = time.time() + self.delay
+ self.check_cache()
+ return self.cache[key]
+
+ def check_cache(self):
+ # first check for old ones
+ t = time.time()
+ if self.soonest <= t:
+ for k, v in self.timeout.items():
+ if v < t:
+ self.cache[k].close()
+ del self.cache[k]
+ del self.timeout[k]
+ self.soonest = min(self.timeout.values())
+
+ # then check the size
+ if len(self.cache) == self.max_conns:
+ for k, v in self.timeout.items():
+ if v == self.soonest:
+ del self.cache[k]
+ del self.timeout[k]
+ break
+ self.soonest = min(self.timeout.values())
+
+ def clear_cache(self):
+ for conn in self.cache.values():
+ conn.close()
+ self.cache.clear()
+ self.timeout.clear()
+
' + _('Hide Search Matches') + '
') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/personal/api-docs/_static/documentation_options.js b/personal/api-docs/_static/documentation_options.js new file mode 100644 index 0000000..1adc265 --- /dev/null +++ b/personal/api-docs/_static/documentation_options.js @@ -0,0 +1,10 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '1.1.1555 Personal', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + FILE_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, +}; \ No newline at end of file diff --git a/personal/api-docs/_static/down-pressed.png b/personal/api-docs/_static/down-pressed.png new file mode 100644 index 0000000..5756c8c Binary files /dev/null and b/personal/api-docs/_static/down-pressed.png differ diff --git a/personal/api-docs/_static/down.png b/personal/api-docs/_static/down.png new file mode 100644 index 0000000..1b3bdad Binary files /dev/null and b/personal/api-docs/_static/down.png differ diff --git a/personal/api-docs/_static/favicon.ico b/personal/api-docs/_static/favicon.ico new file mode 100644 index 0000000..e99e1d3 Binary files /dev/null and b/personal/api-docs/_static/favicon.ico differ diff --git a/personal/api-docs/_static/file.png b/personal/api-docs/_static/file.png new file mode 100644 index 0000000..a858a41 Binary files /dev/null and b/personal/api-docs/_static/file.png differ diff --git a/personal/api-docs/_static/fonts/FontAwesome.otf b/personal/api-docs/_static/fonts/FontAwesome.otf new file mode 100644 index 0000000..81c9ad9 Binary files /dev/null and b/personal/api-docs/_static/fonts/FontAwesome.otf differ diff --git a/personal/api-docs/_static/fonts/Inconsolata-Bold.ttf b/personal/api-docs/_static/fonts/Inconsolata-Bold.ttf new file mode 100644 index 0000000..58c9fef Binary files /dev/null and b/personal/api-docs/_static/fonts/Inconsolata-Bold.ttf differ diff --git a/personal/api-docs/_static/fonts/Inconsolata-Regular.ttf b/personal/api-docs/_static/fonts/Inconsolata-Regular.ttf new file mode 100644 index 0000000..a87ffba Binary files /dev/null and b/personal/api-docs/_static/fonts/Inconsolata-Regular.ttf differ diff --git a/personal/api-docs/_static/fonts/Lato-Bold.ttf b/personal/api-docs/_static/fonts/Lato-Bold.ttf new file mode 100644 index 0000000..7434369 Binary files /dev/null and b/personal/api-docs/_static/fonts/Lato-Bold.ttf differ diff --git a/personal/api-docs/_static/fonts/Lato-Regular.ttf b/personal/api-docs/_static/fonts/Lato-Regular.ttf new file mode 100644 index 0000000..04ea8ef Binary files /dev/null and b/personal/api-docs/_static/fonts/Lato-Regular.ttf differ diff --git a/personal/api-docs/_static/fonts/RobotoSlab-Bold.ttf b/personal/api-docs/_static/fonts/RobotoSlab-Bold.ttf new file mode 100644 index 0000000..df5d1df Binary files /dev/null and b/personal/api-docs/_static/fonts/RobotoSlab-Bold.ttf differ diff --git a/personal/api-docs/_static/fonts/RobotoSlab-Regular.ttf b/personal/api-docs/_static/fonts/RobotoSlab-Regular.ttf new file mode 100644 index 0000000..eb52a79 Binary files /dev/null and b/personal/api-docs/_static/fonts/RobotoSlab-Regular.ttf differ diff --git a/personal/api-docs/_static/fonts/fontawesome-webfont.eot b/personal/api-docs/_static/fonts/fontawesome-webfont.eot new file mode 100644 index 0000000..84677bc Binary files /dev/null and b/personal/api-docs/_static/fonts/fontawesome-webfont.eot differ diff --git a/personal/api-docs/_static/fonts/fontawesome-webfont.svg b/personal/api-docs/_static/fonts/fontawesome-webfont.svg new file mode 100644 index 0000000..d907b25 --- /dev/null +++ b/personal/api-docs/_static/fonts/fontawesome-webfont.svg @@ -0,0 +1,520 @@ + + + \ No newline at end of file diff --git a/personal/api-docs/_static/fonts/fontawesome-webfont.ttf b/personal/api-docs/_static/fonts/fontawesome-webfont.ttf new file mode 100644 index 0000000..96a3639 Binary files /dev/null and b/personal/api-docs/_static/fonts/fontawesome-webfont.ttf differ diff --git a/personal/api-docs/_static/fonts/fontawesome-webfont.woff b/personal/api-docs/_static/fonts/fontawesome-webfont.woff new file mode 100644 index 0000000..628b6a5 Binary files /dev/null and b/personal/api-docs/_static/fonts/fontawesome-webfont.woff differ diff --git a/personal/api-docs/_static/jquery-3.2.1.js b/personal/api-docs/_static/jquery-3.2.1.js new file mode 100644 index 0000000..d2d8ca4 --- /dev/null +++ b/personal/api-docs/_static/jquery-3.2.1.js @@ -0,0 +1,10253 @@ +/*! + * jQuery JavaScript Library v3.2.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2017-03-20T18:59Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + + + + function DOMEval( code, doc ) { + doc = doc || document; + + var script = doc.createElement( "script" ); + + script.text = code; + doc.head.appendChild( script ).parentNode.removeChild( script ); + } +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.2.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && Array.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-08-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: