-import sys, subprocess, os, re, time, getopt, shlex
+import sys, subprocess, os, re, time, getopt, shlex, xnudefines
import lldb
from functools import wraps
from ctypes import c_ulonglong as uint64_t
from utils import *
from core.lazytarget import *
-MODULE_NAME=__name__
+MODULE_NAME=__name__
""" Kernel Debugging macros for lldb.
Please make sure you read the README COMPLETELY BEFORE reading anything below.
- It is very critical that you read coding guidelines in Section E in README file.
+ It is very critical that you read coding guidelines in Section E in README file.
"""
+COMMON_HELP_STRING = """
+ -h Show the help string for the command.
+ -c [always|auto|never|0|1]
+ Control the colorized output of certain commands
+ -o <path/to/filename> The output of this command execution will be saved to file. Parser information or errors will
+ not be sent to file though. eg /tmp/output.txt
+ -s <filter_string> The "filter_string" param is parsed to python regex expression and each line of output
+ will be printed/saved only if it matches the expression.
+ -v [-v...] Each additional -v will increase the verbosity of the command.
+ -p <plugin_name> Send the output of the command to plugin. Please see README for usage of plugins.
+"""
# End Utility functions
-# Debugging specific utility functions
+# Debugging specific utility functions
#decorators. Not to be called directly.
return obj
return _set_header
-# holds type declarations done by xnu.
+# holds type declarations done by xnu.
#DONOTTOUCHME: Exclusive use of lldb_type_summary only.
-lldb_summary_definitions = {}
+lldb_summary_definitions = {}
def lldb_type_summary(types_list):
- """ A function decorator to register a summary for a type in lldb.
+ """ A function decorator to register a summary for a type in lldb.
params: types_list - [] an array of types that you wish to register a summary callback function. (ex. ['task *', 'task_t'])
returns: Nothing. This is a decorator.
"""
out_string += "\n" + obj.header +"\n"
out_string += obj( core.value(lldbval) )
return out_string
-
+
myglobals = globals()
summary_function_name = "LLDBSummary" + obj.__name__
myglobals[summary_function_name] = _internal_summary_function
summary_function = myglobals[summary_function_name]
summary_function.__doc__ = obj.__doc__
-
+
global lldb_summary_definitions
for single_type in types_list:
if config['showTypeSummary']:
lldb.debugger.HandleCommand("type summary delete --category kernel \""+ single_type + "\"")
lldb.debugger.HandleCommand("type summary add \""+ single_type +"\" --category kernel --python-function " + MODULE_NAME + "." + summary_function_name)
lldb_summary_definitions[single_type] = obj
-
+
return obj
return _get_summary
-#global cache of documentation for lldb commands exported by this module
+#global cache of documentation for lldb commands exported by this module
#DONOTTOUCHME: Exclusive use of lldb_command only.
lldb_command_documentation = {}
-def lldb_command(cmd_name, option_string = ''):
+def lldb_command(cmd_name, option_string = '', fancy=False):
""" A function decorator to define a command with namd 'cmd_name' in the lldb scope to call python function.
params: cmd_name - str : name of command to be set in lldb prompt.
- option_string - str: getopt like option string. Only CAPITAL LETTER options allowed.
+ option_string - str: getopt like option string. Only CAPITAL LETTER options allowed.
see README on Customizing command options.
+ fancy - bool : whether the command will receive an 'O' object to do fancy output (tables, indent, color)
"""
if option_string != option_string.upper():
raise RuntimeError("Cannot setup command with lowercase option args. %s" % option_string)
def _cmd(obj):
def _internal_command_function(debugger, command, result, internal_dict):
global config, lldb_run_command_state
- stream = CommandOutput(result)
+ stream = CommandOutput(cmd_name, result)
# need to avoid printing on stdout if called from lldb_run_command.
if 'active' in lldb_run_command_state and lldb_run_command_state['active']:
debuglog('Running %s from lldb_run_command' % command)
command_args = shlex.split(command)
lldb.debugger.HandleCommand('type category disable kernel' )
def_verbose_level = config['verbosity']
-
+
try:
stream.setOptions(command_args, option_string)
if stream.verbose_level != 0:
- config['verbosity'] += stream.verbose_level
+ config['verbosity'] += stream.verbose_level
with RedirectStdStreams(stdout=stream) :
+ args = { 'cmd_args': stream.target_cmd_args }
if option_string:
- obj(cmd_args=stream.target_cmd_args, cmd_options=stream.target_cmd_options)
- else:
- obj(cmd_args=stream.target_cmd_args)
+ args['cmd_options'] = stream.target_cmd_options
+ if fancy:
+ args['O'] = stream
+ obj(**args)
except KeyboardInterrupt:
print "Execution interrupted by user"
except ArgumentError as arg_error:
if config['showTypeSummary']:
lldb.debugger.HandleCommand('type category enable kernel' )
-
+
if stream.pluginRequired :
plugin = LoadXNUPlugin(stream.pluginName)
if plugin == None :
return_data = plugin.plugin_execute(cmd_name, result.GetOutput())
ProcessXNUPluginResult(return_data)
plugin.plugin_cleanup()
-
+
#restore the verbose level after command is complete
config['verbosity'] = def_verbose_level
-
+
return
myglobals = globals()
if not obj.__doc__ :
print "ERROR: Cannot register command({:s}) without documentation".format(cmd_name)
return obj
+ obj.__doc__ += "\n" + COMMON_HELP_STRING
command_function.__doc__ = obj.__doc__
global lldb_command_documentation
if cmd_name in lldb_command_documentation:
lldb.debugger.HandleCommand("command script delete "+cmd_name)
lldb_command_documentation[cmd_name] = (obj.__name__, obj.__doc__.lstrip(), option_string)
lldb.debugger.HandleCommand("command script add -f " + MODULE_NAME + "." + command_function_name + " " + cmd_name)
+
+ if fancy:
+ def wrapped_fun(cmd_args=None, cmd_options={}, O=None):
+ if O is None:
+ stream = CommandOutput(cmd_name, fhandle=sys.stdout)
+ with RedirectStdStreams(stdout=stream):
+ return obj(cmd_args, cmd_options, stream)
+ else:
+ return obj(cmd_args, cmd_options, O)
+ return wrapped_fun
return obj
return _cmd
def lldb_alias(alias_name, cmd_line):
- """ define an alias in the lldb command line.
+ """ define an alias in the lldb command line.
A programatic way of registering an alias. This basically does
(lldb)command alias alias_name "cmd_line"
- ex.
+ ex.
lldb_alias('readphys16', 'readphys 16')
"""
alias_name = alias_name.strip()
return
def LoadXNUPlugin(name):
- """ Try to load a plugin from the plugins directory.
+ """ Try to load a plugin from the plugins directory.
"""
retval = None
name=name.strip()
print "Plugin is not correctly implemented. Please read documentation on implementing plugins"
except:
print "plugin not found :"+name
-
+
return retval
def ProcessXNUPluginResult(result_data):
ret_status = result_data[0]
ret_string = result_data[1]
ret_commands = result_data[2]
-
+
if ret_status == False:
print "Plugin failed: " + ret_string
return
#DONOTTOUCHME: Exclusive use of xnudebug_test only
lldb_command_tests = {}
def xnudebug_test(test_name):
- """ A function decoratore to register a test with the framework. Each test is supposed to be of format
+ """ A function decoratore to register a test with the framework. Each test is supposed to be of format
def Test<name>(kernel_target, config, lldb_obj, isConnected )
-
+
NOTE: The testname should start with "Test" else exception will be raised.
"""
def _test(obj):
global lldb_command_tests
if obj.__name__.find("Test") != 0 :
- print "Test name ", obj.__name__ , " should start with Test"
+ print "Test name ", obj.__name__ , " should start with Test"
raise ValueError
lldb_command_tests[test_name] = (test_name, obj.__name__, obj, obj.__doc__)
return obj
# End Debugging specific utility functions
-# Kernel Debugging specific classes and accessor methods
+# Kernel Debugging specific classes and accessor methods
# global access object for target kernel
def GetObjectAtIndexFromArray(array_base, index):
""" Subscript indexing for arrays that are represented in C as pointers.
for ex. int *arr = malloc(20*sizeof(int));
- now to get 3rd int from 'arr' you'd do
+ now to get 3rd int from 'arr' you'd do
arr[2] in C
GetObjectAtIndexFromArray(arr_val,2)
params:
def GetLLDBThreadForKernelThread(thread_obj):
""" Get a reference to lldb.SBThread representation for kernel thread.
params:
- thread_obj : core.cvalue - thread object of type thread_t
- returns
+ thread_obj : core.cvalue - thread object of type thread_t
+ returns
lldb.SBThread - lldb thread object for getting backtrace/registers etc.
"""
tid = unsigned(thread_obj.thread_id)
return sbthread
+def GetKextSymbolInfo(load_addr):
+ """ Get a string descriptiong load_addr <kextname> + offset
+ params:
+ load_addr - int address value of pc in backtrace.
+ returns: str - kext name + offset string. If no cached data available, warning message is returned.
+ """
+ symbol_name = "None"
+ symbol_offset = load_addr
+ kmod_val = kern.globals.kmod
+ if not kern.arch.startswith('arm64'):
+ for kval in IterateLinkedList(kmod_val, 'next'):
+ if load_addr >= unsigned(kval.address) and \
+ load_addr <= (unsigned(kval.address) + unsigned(kval.size)):
+ symbol_name = kval.name
+ symbol_offset = load_addr - unsigned(kval.address)
+ break
+ return "{:#018x} {:s} + {:#x} \n".format(load_addr, symbol_name, symbol_offset)
+
+ # only for arm64 we do lookup for split kexts.
+ cached_kext_info = caching.GetDynamicCacheData("kern.kexts.loadinformation", [])
+ if not cached_kext_info and str(GetConnectionProtocol()) == "core":
+ cached_kext_info = GetKextLoadInformation()
+
+ if not cached_kext_info:
+ return "{:#018x} ~ kext info not available. please run 'showallkexts' once ~ \n".format(load_addr)
+
+ for kval in cached_kext_info:
+ text_seg = kval[5]
+ if load_addr >= text_seg.vmaddr and \
+ load_addr <= (text_seg.vmaddr + text_seg.vmsize):
+ symbol_name = kval[2]
+ symbol_offset = load_addr - text_seg.vmaddr
+ break
+ return "{:#018x} {:s} + {:#x} \n".format(load_addr, symbol_name, symbol_offset)
+
def GetThreadBackTrace(thread_obj, verbosity = vHUMAN, prefix = ""):
""" Get a string to display back trace for a thread.
params:
if not function:
# No debug info for 'function'.
- out_string += prefix
+ out_string += prefix
if not is_continuation:
- out_string += "{fp:#018x} ".format(fp = frame_p)
-
+ out_string += "{fp:#018x} ".format(fp = frame_p)
+
symbol = frame.GetSymbol()
if not symbol:
- symbol_name = "None"
- symbol_offset = load_addr
- kmod_val = kern.globals.kmod
- for kval in IterateLinkedList(kmod_val, 'next'):
- if load_addr >= unsigned(kval.address) and \
- load_addr <= (unsigned(kval.address) + unsigned(kval.size)):
- symbol_name = kval.name
- symbol_offset = load_addr - unsigned(kval.address)
- break
- out_string += "{:#018x} {:s} + {:#x} \n".format(load_addr, symbol_name, symbol_offset)
+ out_string += GetKextSymbolInfo(load_addr)
else:
file_addr = addr.GetFileAddress()
start_addr = symbol.GetStartAddress().GetFileAddress()
symbol_name = symbol.GetName()
symbol_offset = file_addr - start_addr
- out_string += "{addr:#018x} {mod}`{symbol} + {offset:#x} \n".format(addr=load_addr,
+ out_string += "{addr:#018x} {mod}`{symbol} + {offset:#x} \n".format(addr=load_addr,
mod=mod_name, symbol=symbol_name, offset=symbol_offset)
else:
# Debug info is available for 'function'.
func_name = '%s [inlined]' % func_name if frame.IsInlined() else func_name
if is_continuation and frame.IsInlined():
debuglog("Skipping frame for thread {:#018x} since its inlined".format(thread_obj))
- continue
- out_string += prefix
+ continue
+ out_string += prefix
if not is_continuation:
out_string += "{fp:#018x} ".format(fp=frame_p)
out_string += "{addr:#018x} {func}{args} \n".format(addr=load_addr,
func=func_name,
file=file_name, line=line_num,
args="(" + (str(frame.arguments).replace("\n", ", ") if len(frame.arguments) > 0 else "void") + ")")
- iteration += 1
+ iteration += 1
if frame_p:
last_frame_p = frame_p
return out_string
def GetSourceInformationForAddress(addr):
- """ convert and address to function +offset information.
+ """ convert and address to function +offset information.
params: addr - int address in the binary to be symbolicated
- returns: string of format "0xaddress: function + offset"
+ returns: string of format "0xaddress: function + offset"
"""
symbols = kern.SymbolicateFromAddress(addr)
format_string = "{0:#018x} <{1:s} + {2:#0x}>"
""" Find a local variable by name
params:
variable_name: str - name of variable to search for
- returns:
+ returns:
core.value - if the variable is found.
None - if not found or not Valid
"""
print " {0: <20s} - {1}".format(cmd , lldb_command_documentation[cmd][1].split("\n")[0].strip())
else:
print " {0: <20s} - {1}".format(cmd , "No help string found.")
- print """
- Each of the functions listed here accept the following common options.
- -h Show the help string for the command.
- -o <path/to/filename> The output of this command execution will be saved to file. Parser information or errors will
- not be sent to file though. eg /tmp/output.txt
- -s <filter_string> The "filter_string" param is parsed to python regex expression and each line of output
- will be printed/saved only if it matches the expression.
- -v [-v...] Each additional -v will increase the verbosity of the command.
- -p <plugin_name> Send the output of the command to plugin. Please see README for usage of plugins.
-
- Additionally, each command implementation may have more options. "(lldb) help <command> " will show these options.
- """
+ print 'Each of the functions listed here accept the following common options. '
+ print COMMON_HELP_STRING
+ print 'Additionally, each command implementation may have more options. "(lldb) help <command> " will show these options.'
return None
-@lldb_command('showraw')
+@lldb_command('showraw')
def ShowRawCommand(cmd_args=None):
- """ A command to disable the kernel summaries and show data as seen by the system.
+ """ A command to disable the kernel summaries and show data as seen by the system.
This is useful when trying to read every field of a struct as compared to brief summary
"""
command = " ".join(cmd_args)
lldb.debugger.HandleCommand('type category disable kernel' )
lldb.debugger.HandleCommand( command )
lldb.debugger.HandleCommand('type category enable kernel' )
-
+
@lldb_command('xnudebug')
def XnuDebugCommand(cmd_args=None):
reload:
Reload a submodule from the xnu/tools/lldb directory. Do not include the ".py" suffix in modulename.
usage: xnudebug reload <modulename> (eg. memory, process, stats etc)
+ flushcache:
+ remove any cached data held in static or dynamic data cache.
+ usage: xnudebug flushcache
test:
Start running registered test with <name> from various modules.
usage: xnudebug test <name> (eg. test_memstats)
command_args = cmd_args
if len(command_args) == 0:
raise ArgumentError("No command specified.")
- supported_subcommands = ['debug', 'reload', 'test', 'testall']
+ supported_subcommands = ['debug', 'reload', 'test', 'testall', 'flushcache']
subcommand = GetLongestMatchOption(command_args[0], supported_subcommands, True)
if len(subcommand) == 0:
raise ArgumentError("Subcommand (%s) is not a valid command. " % str(command_args[0]))
-
+
subcommand = subcommand[0].lower()
if subcommand == 'debug':
if command_args[-1].lower().find('dis') >=0 and config['debug']:
config['debug'] = True
EnableLLDBAPILogging() # provided by utils.py
print "Enabled debug logging. \nPlease run 'xnudebug debug disable' to disable it again. "
-
+ if subcommand == 'flushcache':
+ print "Current size of cache: {}".format(caching.GetSizeOfCache())
+ caching.ClearAllCache()
+
if subcommand == 'reload':
module_name = command_args[-1]
if module_name in sys.modules:
if test_name in lldb_command_tests:
test = lldb_command_tests[test_name]
print "Running test {:s}".format(test[0])
- if test[2](kern, config, lldb, True) :
+ if test[2](kern, config, lldb, True) :
print "[PASSED] {:s}".format(test[0])
else:
print "[FAILED] {:s}".format(test[0])
- return ""
+ return ""
else:
print "No such test registered with name: {:s}".format(test_name)
print "XNUDEBUG Available tests are:"
for i in lldb_command_tests.keys():
print i
return None
-
+
return False
@lldb_command('showversion')
"""
print kern.version
+def ProcessPanicStackshot(panic_stackshot_addr, panic_stackshot_len):
+ """ Process the panic stackshot from the panic header, saving it to a file if it is valid
+ params: panic_stackshot_addr : start address of the panic stackshot binary data
+ panic_stackshot_len : length of the stackshot binary data
+ returns: nothing
+ """
+ if not panic_stackshot_addr:
+ print "No panic stackshot available (invalid addr)"
+ return
+
+ if not panic_stackshot_len:
+ print "No panic stackshot available (zero length)"
+ return;
-@lldb_command('paniclog', 'S')
+ ts = int(time.time())
+ ss_binfile = "/tmp/panic_%d.bin" % ts
+ ss_ipsfile = "/tmp/stacks_%d.ips" % ts
+
+ if not SaveDataToFile(panic_stackshot_addr, panic_stackshot_len, ss_binfile, None):
+ print "Failed to save stackshot binary data to file"
+ return
+
+ self_path = str(__file__)
+ base_dir_name = self_path[:self_path.rfind("/")]
+ print "python %s/kcdata.py %s -s %s" % (base_dir_name, ss_binfile, ss_ipsfile)
+ (c,so,se) = RunShellCommand("python %s/kcdata.py %s -s %s" % (base_dir_name, ss_binfile, ss_ipsfile))
+ if c == 0:
+ print "Saved ips stackshot file as %s" % ss_ipsfile
+ return
+ else:
+ print "Failed to run command: exit code: %d, SO: %s SE: %s" % (c, so, se)
+ return
+
+def ParseEmbeddedPanicLog(panic_header, cmd_options={}):
+ panic_buf = Cast(panic_header, 'char *')
+ panic_log_magic = unsigned(panic_header.eph_magic)
+ panic_log_begin_offset = unsigned(panic_header.eph_panic_log_offset)
+ panic_log_len = unsigned(panic_header.eph_panic_log_len)
+ other_log_begin_offset = unsigned(panic_header.eph_other_log_offset)
+ other_log_len = unsigned(panic_header.eph_other_log_len)
+ expected_panic_magic = xnudefines.EMBEDDED_PANIC_MAGIC
+ panic_stackshot_addr = unsigned(panic_header) + unsigned(panic_header.eph_stackshot_offset)
+ panic_stackshot_len = unsigned(panic_header.eph_stackshot_len)
+ panic_header_flags = unsigned(panic_header.eph_panic_flags)
+
+ warn_str = ""
+ out_str = ""
+
+ if panic_log_magic != 0 and panic_log_magic != expected_panic_magic:
+ warn_str += "BAD MAGIC! Found 0x%x expected 0x%x" % (panic_log_magic,
+ expected_panic_magic)
+
+ if warn_str:
+ print "\n %s" % warn_str
+ if panic_log_begin_offset == 0:
+ return
+
+ if "-S" in cmd_options:
+ if panic_header_flags & xnudefines.EMBEDDED_PANIC_STACKSHOT_SUCCEEDED_FLAG:
+ ProcessPanicStackshot(panic_stackshot_addr, panic_stackshot_len)
+ else:
+ print "No panic stackshot available"
+
+ panic_log_curindex = 0
+ while panic_log_curindex < panic_log_len:
+ p_char = str(panic_buf[(panic_log_begin_offset + panic_log_curindex)])
+ out_str += p_char
+ panic_log_curindex += 1
+
+ if other_log_begin_offset != 0:
+ other_log_curindex = 0
+ while other_log_curindex < other_log_len:
+ p_char = str(panic_buf[(other_log_begin_offset + other_log_curindex)])
+ out_str += p_char
+ other_log_curindex += 1
+
+ print out_str
+ return
+
+def ParseMacOSPanicLog(panic_header, cmd_options={}):
+ panic_buf = Cast(panic_header, 'char *')
+ panic_log_magic = unsigned(panic_header.mph_magic)
+ panic_log_begin_offset = unsigned(panic_header.mph_panic_log_offset)
+ panic_log_len = unsigned(panic_header.mph_panic_log_len)
+ other_log_begin_offset = unsigned(panic_header.mph_other_log_offset)
+ other_log_len = unsigned(panic_header.mph_other_log_len)
+ cur_debug_buf_ptr_offset = (unsigned(kern.globals.debug_buf_ptr) - unsigned(panic_header))
+ if other_log_begin_offset != 0 and (other_log_len == 0 or other_log_len < (cur_debug_buf_ptr_offset - other_log_begin_offset)):
+ other_log_len = cur_debug_buf_ptr_offset - other_log_begin_offset
+ expected_panic_magic = xnudefines.MACOS_PANIC_MAGIC
+
+ # use the global if it's available (on an x86 corefile), otherwise refer to the header
+ if hasattr(kern.globals, "panic_stackshot_buf"):
+ panic_stackshot_addr = unsigned(kern.globals.panic_stackshot_buf)
+ panic_stackshot_len = unsigned(kern.globals.panic_stackshot_len)
+ else:
+ panic_stackshot_addr = unsigned(panic_header) + unsigned(panic_header.mph_stackshot_offset)
+ panic_stackshot_len = unsigned(panic_header.mph_stackshot_len)
+
+ panic_header_flags = unsigned(panic_header.mph_panic_flags)
+
+ warn_str = ""
+ out_str = ""
+
+ if panic_log_magic != 0 and panic_log_magic != expected_panic_magic:
+ warn_str += "BAD MAGIC! Found 0x%x expected 0x%x" % (panic_log_magic,
+ expected_panic_magic)
+
+ if warn_str:
+ print "\n %s" % warn_str
+ if panic_log_begin_offset == 0:
+ return
+
+ if "-S" in cmd_options:
+ if panic_header_flags & xnudefines.MACOS_PANIC_STACKSHOT_SUCCEEDED_FLAG:
+ ProcessPanicStackshot(panic_stackshot_addr, panic_stackshot_len)
+ else:
+ print "No panic stackshot available"
+
+ panic_log_curindex = 0
+ while panic_log_curindex < panic_log_len:
+ p_char = str(panic_buf[(panic_log_begin_offset + panic_log_curindex)])
+ out_str += p_char
+ panic_log_curindex += 1
+
+ if other_log_begin_offset != 0:
+ other_log_curindex = 0
+ while other_log_curindex < other_log_len:
+ p_char = str(panic_buf[(other_log_begin_offset + other_log_curindex)])
+ out_str += p_char
+ other_log_curindex += 1
+
+ print out_str
+ return
+
+def ParseAURRPanicLog(panic_header, cmd_options={}):
+ reset_cause = {
+ 0x0: "OTHER",
+ 0x1: "CATERR",
+ 0x2: "SWD_TIMEOUT",
+ 0x3: "GLOBAL RESET",
+ 0x4: "STRAIGHT TO S5",
+ }
+
+ expected_panic_magic = xnudefines.AURR_PANIC_MAGIC
+
+ panic_buf = Cast(panic_header, 'char *')
+
+ try:
+ # This line will blow up if there's not type info for this struct (older kernel)
+ # We fall back to manual parsing below
+ aurr_panic_header = Cast(panic_header, 'struct efi_aurr_panic_header *')
+ panic_log_magic = unsigned(aurr_panic_header.efi_aurr_magic)
+ panic_log_version = unsigned(aurr_panic_header.efi_aurr_version)
+ panic_log_reset_cause = unsigned(aurr_panic_header.efi_aurr_reset_cause)
+ panic_log_reset_log_offset = unsigned(aurr_panic_header.efi_aurr_reset_log_offset)
+ panic_log_reset_log_len = unsigned(aurr_panic_header.efi_aurr_reset_log_len)
+ except Exception as e:
+ print "*** Warning: kernel symbol file has no type information for 'struct efi_aurr_panic_header'..."
+ print "*** Warning: trying to manually parse..."
+ aurr_panic_header = Cast(panic_header, "uint32_t *")
+ panic_log_magic = unsigned(aurr_panic_header[0])
+ # panic_log_crc = unsigned(aurr_panic_header[1])
+ panic_log_version = unsigned(aurr_panic_header[2])
+ panic_log_reset_cause = unsigned(aurr_panic_header[3])
+ panic_log_reset_log_offset = unsigned(aurr_panic_header[4])
+ panic_log_reset_log_len = unsigned(aurr_panic_header[5])
+
+ if panic_log_magic != 0 and panic_log_magic != expected_panic_magic:
+ print "BAD MAGIC! Found 0x%x expected 0x%x" % (panic_log_magic,
+ expected_panic_magic)
+ return
+
+ print "AURR Panic Version: %d" % (panic_log_version)
+
+ # When it comes time to extend this in the future, please follow the
+ # construct used below in ShowPanicLog()
+ if panic_log_version in (xnudefines.AURR_PANIC_VERSION, xnudefines.AURR_CRASHLOG_PANIC_VERSION):
+ # AURR Report Version 1 (AURR/MacEFI) or 2 (Crashlog)
+ # see macefifirmware/Vendor/Apple/EfiPkg/AppleDebugSupport/Library/Debugger.h
+ print "Reset Cause: 0x%x (%s)" % (panic_log_reset_cause, reset_cause.get(panic_log_reset_cause, "UNKNOWN"))
+
+ # Adjust panic log string length (cap to maximum supported values)
+ if panic_log_version == xnudefines.AURR_PANIC_VERSION:
+ max_string_len = panic_log_reset_log_len
+ elif panic_log_version == xnudefines.AURR_CRASHLOG_PANIC_VERSION:
+ max_string_len = xnudefines.CRASHLOG_PANIC_STRING_LEN
+
+ panic_str_offset = 0
+ out_str = ""
+
+ while panic_str_offset < max_string_len:
+ p_char = str(panic_buf[panic_log_reset_log_offset + panic_str_offset])
+ out_str += p_char
+ panic_str_offset += 1
+
+ print out_str
+
+ # Save Crashlog Binary Data (if available)
+ if "-S" in cmd_options and panic_log_version == xnudefines.AURR_CRASHLOG_PANIC_VERSION:
+ crashlog_binary_offset = panic_log_reset_log_offset + xnudefines.CRASHLOG_PANIC_STRING_LEN
+ crashlog_binary_size = (panic_log_reset_log_len > xnudefines.CRASHLOG_PANIC_STRING_LEN) and (panic_log_reset_log_len - xnudefines.CRASHLOG_PANIC_STRING_LEN) or 0
+
+ if 0 == crashlog_binary_size:
+ print "No crashlog data found..."
+ return
+
+ # Save to file
+ ts = int(time.time())
+ ss_binfile = "/tmp/crashlog_%d.bin" % ts
+
+ if not SaveDataToFile(panic_buf + crashlog_binary_offset, crashlog_binary_size, ss_binfile, None):
+ print "Failed to save crashlog binary data to file"
+ return
+ else:
+ return ParseUnknownPanicLog(panic_header, cmd_options)
+
+ return
+
+def ParseUnknownPanicLog(panic_header, cmd_options={}):
+ magic_ptr = Cast(panic_header, 'uint32_t *')
+ panic_log_magic = dereference(magic_ptr)
+ print "Unrecognized panic header format. Magic: 0x%x..." % unsigned(panic_log_magic)
+ print "Panic region starts at 0x%08x" % int(panic_header)
+ print "Hint: To dump this panic header in order to try manually parsing it, use this command:"
+ print " (lldb) memory read -fx -s4 -c64 0x%08x" % int(panic_header)
+ print " ^ that will dump the first 256 bytes of the panic region"
+ ## TBD: Hexdump some bits here to allow folks to poke at the region manually?
+ return
+
+
+@lldb_command('paniclog', 'SM')
def ShowPanicLog(cmd_args=None, cmd_options={}):
""" Display the paniclog information
usage: (lldb) paniclog
options:
-v : increase verbosity
-S : parse stackshot data (if panic stackshot available)
+ -M : parse macOS panic area (print panic string (if available), and/or capture crashlog info)
"""
- binary_data_bytes_to_skip = 0
- if hasattr(kern.globals, "kc_panic_data"):
- binary_data_bytes_to_skip = unsigned(kern.globals.kc_panic_data.kcd_addr_end) - unsigned(kern.globals.kc_panic_data.kcd_addr_begin)
- if binary_data_bytes_to_skip > 0:
- binary_data_bytes_to_skip += sizeof("struct kcdata_item")
- else:
- binary_data_bytes_to_skip = 0
- if "-S" in cmd_options:
- if hasattr(kern.globals, "kc_panic_data"):
- kc_data = unsigned(addressof(kern.globals.kc_panic_data))
- ts = int(time.time())
- ss_binfile = "/tmp/panic_%d.bin" % ts
- ss_ipsfile = "/tmp/stacks_%d.ips" % ts
- print "savekcdata 0x%x -O %s" % (kc_data, ss_binfile)
- SaveKCDataToFile(["0x%x" % kc_data], {"-O":ss_binfile})
- self_path = str(__file__)
- base_dir_name = self_path[:self_path.rfind("/")]
- print "python %s/kcdata.py %s -s %s" % (base_dir_name, ss_binfile, ss_ipsfile)
- (c,so,se) = RunShellCommand("python %s/kcdata.py %s -s %s" % (base_dir_name, ss_binfile, ss_ipsfile))
- if c == 0:
- print "Saved ips stackshot file as %s" % ss_ipsfile
- else:
- print "Failed to run command: exit code: %d, SO: %s SE: %s" % (c, so, se)
- else:
- print "kc_panic_data is unavailable for this kernel config."
+ if "-M" in cmd_options:
+ if not hasattr(kern.globals, "mac_panic_header"):
+ print "macOS panic data requested but unavailable on this device"
+ return
+ panic_header = kern.globals.mac_panic_header
+ # DEBUG HACK FOR TESTING
+ #panic_header = kern.GetValueFromAddress(0xfffffff054098000, "uint32_t *")
+ else:
+ panic_header = kern.globals.panic_info
+
+ if hasattr(panic_header, "eph_magic"):
+ panic_log_magic = unsigned(panic_header.eph_magic)
+ elif hasattr(panic_header, "mph_magic"):
+ panic_log_magic = unsigned(panic_header.mph_magic)
+ else:
+ print "*** Warning: unsure of panic header format, trying anyway"
+ magic_ptr = Cast(panic_header, 'uint32_t *')
+ panic_log_magic = int(dereference(magic_ptr))
- panic_buf = kern.globals.debug_buf_addr
- panic_buf_start = unsigned(panic_buf)
- panic_buf_end = unsigned(kern.globals.debug_buf_ptr)
- num_bytes = panic_buf_end - panic_buf_start
- if num_bytes == 0 :
+ if panic_log_magic == 0:
+ # No panic here..
return
- out_str = ""
- warn_str = ""
- num_print_bytes = 0
- in_binary_data_region = False
- pos = 0
- while pos < num_bytes:
- p_char = str(panic_buf[pos])
- out_str += p_char
- if p_char == '\n':
- if not in_binary_data_region:
- num_print_bytes += 1
- print out_str
- if (out_str.find("Data: BEGIN>>") >= 0):
- in_binary_data_region = True
- pos += binary_data_bytes_to_skip - 1
- if (out_str.find("<<END") >= 0):
- in_binary_data_region = False
- out_str = ""
- if num_print_bytes > 4096 and config['verbosity'] == vHUMAN:
- warn_str = "LLDBMacro Warning: The paniclog is too large. Trimming to 4096 bytes."
- warn_str += " If you wish to see entire log please use '-v' argument."
- break
- pos += 1
- if warn_str:
- print warn_str
+ panic_parsers = {
+ int(xnudefines.AURR_PANIC_MAGIC) : ParseAURRPanicLog,
+ int(xnudefines.MACOS_PANIC_MAGIC) : ParseMacOSPanicLog,
+ int(xnudefines.EMBEDDED_PANIC_MAGIC) : ParseEmbeddedPanicLog,
+ }
- return
+ # Find the right parser (fall back to unknown parser above)
+ parser = panic_parsers.get(panic_log_magic, ParseUnknownPanicLog)
+
+ # execute it
+ return parser(panic_header, cmd_options)
@lldb_command('showbootargs')
def ShowBootArgs(cmd_args=None):
@static_var("last_process_uniq_id", 1)
def GetDebuggerStopIDValue():
- """ Create a unique session identifier.
+ """ Create a unique session identifier.
returns:
int - a unique number identified by processid and stopid.
"""
GetDebuggerStopIDValue.last_process_uniq_id +=1
proc_uniq_id = GetDebuggerStopIDValue.last_process_uniq_id + 1
- stop_id_str = "{:d}:{:d}".format(proc_uniq_id, stop_id)
+ stop_id_str = "{:d}:{:d}".format(proc_uniq_id, stop_id)
return hash(stop_id_str)
# The initialization code to add your commands
debugger.HandleCommand('type summary add --regex --summary-string "${var%s}" -C yes -p -v "char \[[0-9]*\]"')
debugger.HandleCommand('type format add --format hex -C yes uintptr_t')
kern = KernelTarget(debugger)
+ if not hasattr(lldb.SBValue, 'GetValueAsAddress'):
+ warn_str = "WARNING: lldb version is too old. Some commands may break. Please update to latest lldb."
+ if os.isatty(sys.__stdout__.fileno()):
+ warn_str = VT.DarkRed + warn_str + VT.Default
+ print warn_str
print "xnu debug macros loaded successfully. Run showlldbtypesummaries to enable type summaries."
__lldb_init_module(lldb.debugger, None)
@lldb_command('walkqueue_head', 'S')
def WalkQueueHead(cmd_args=[], cmd_options={}):
- """ walk a queue_head_t and list all members in it. Note this is for queue_head_t. refer to osfmk/kern/queue.h
+ """ walk a queue_head_t and list all members in it. Note this is for queue_head_t. refer to osfmk/kern/queue.h
Option: -S - suppress summary output.
Usage: (lldb) walkqueue_head <queue_entry *> <struct type> <fieldname>
ex: (lldb) walkqueue_head 0x7fffff80 "thread *" "task_threads"
-
+
"""
global lldb_summary_definitions
if not cmd_args:
print lldb_summary_definitions[el_type](i)
else:
print "{0: <#020x}".format(i)
-
-@lldb_command('walklist_entry', 'S')
+
+@lldb_command('walklist_entry', 'SE')
def WalkList(cmd_args=[], cmd_options={}):
""" iterate over a list as defined with LIST_ENTRY in bsd/sys/queue.h
params:
element_type - str : Type of the next element
field_name - str : Name of the field in next element's structure
- Option: -S - suppress summary output.
+ Options: -S - suppress summary output.
+ -E - Iterate using SLIST_ENTRYs
+
Usage: (lldb) walklist_entry <obj with list_entry *> <struct type> <fieldname>
ex: (lldb) walklist_entry 0x7fffff80 "struct proc *" "p_sibling"
-
+
"""
global lldb_summary_definitions
if not cmd_args:
el_type = cmd_args[1]
queue_head = kern.GetValueFromAddress(cmd_args[0], el_type)
field_name = cmd_args[2]
-
showsummary = False
if el_type in lldb_summary_definitions:
showsummary = True
if '-S' in cmd_options:
showsummary = False
+ if '-E' in cmd_options:
+ prefix = 's'
+ else:
+ prefix = ''
elt = queue_head
while unsigned(elt) != 0:
i = elt
- elt = elt.__getattr__(field_name).le_next
+ elt = elt.__getattr__(field_name).__getattr__(prefix + 'le_next')
if showsummary:
print lldb_summary_definitions[el_type](i)
else:
print "{0: <#020x}".format(i)
+def trace_parse_Copt(Copt):
+ """Parses the -C option argument and returns a list of CPUs
+ """
+ cpusOpt = Copt
+ cpuList = cpusOpt.split(",")
+ chosen_cpus = []
+ for cpu_num_string in cpuList:
+ try:
+ if '-' in cpu_num_string:
+ parts = cpu_num_string.split('-')
+ if len(parts) != 2 or not (parts[0].isdigit() and parts[1].isdigit()):
+ raise ArgumentError("Invalid cpu specification: %s" % cpu_num_string)
+ firstRange = int(parts[0])
+ lastRange = int(parts[1])
+ if firstRange >= kern.globals.real_ncpus or lastRange >= kern.globals.real_ncpus:
+ raise ValueError()
+ if lastRange < firstRange:
+ raise ArgumentError("Invalid CPU range specified: `%s'" % cpu_num_string)
+ for cpu_num in range(firstRange, lastRange + 1):
+ if cpu_num not in chosen_cpus:
+ chosen_cpus.append(cpu_num)
+ else:
+ chosen_cpu = int(cpu_num_string)
+ if chosen_cpu < 0 or chosen_cpu >= kern.globals.real_ncpus:
+ raise ValueError()
+ if chosen_cpu not in chosen_cpus:
+ chosen_cpus.append(chosen_cpu)
+ except ValueError:
+ raise ArgumentError("Invalid CPU number specified. Valid range is 0..%d" % (kern.globals.real_ncpus - 1))
+
+ return chosen_cpus
+
+
+IDX_CPU = 0
+IDX_RINGPOS = 1
+IDX_RINGENTRY = 2
+def Trace_cmd(cmd_args=[], cmd_options={}, headerString=lambda:"", entryString=lambda x:"", ring=[], entries_per_cpu=0, max_backtraces=0):
+ """Generic trace dumper helper function
+ """
+
+ if '-S' in cmd_options:
+ field_arg = cmd_options['-S']
+ try:
+ getattr(ring[0][0], field_arg)
+ sort_key_field_name = field_arg
+ except AttributeError:
+ raise ArgumentError("Invalid sort key field name `%s'" % field_arg)
+ else:
+ sort_key_field_name = 'start_time_abs'
+
+ if '-C' in cmd_options:
+ chosen_cpus = trace_parse_Copt(cmd_options['-C'])
+ else:
+ chosen_cpus = [x for x in range(kern.globals.real_ncpus)]
+
+ try:
+ limit_output_count = int(cmd_options['-N'])
+ except ValueError:
+ raise ArgumentError("Invalid output count `%s'" % cmd_options['-N']);
+ except KeyError:
+ limit_output_count = None
+
+ reverse_sort = '-R' in cmd_options
+ backtraces = '-B' in cmd_options
+
+ # entries will be a list of 3-tuples, each holding the CPU on which the iotrace entry was collected,
+ # the original ring index, and the iotrace entry.
+ entries = []
+ for x in chosen_cpus:
+ ring_slice = [(x, y, ring[x][y]) for y in range(entries_per_cpu)]
+ entries.extend(ring_slice)
+
+ total_entries = len(entries)
+
+ entries.sort(key=lambda x: getattr(x[IDX_RINGENTRY], sort_key_field_name), reverse=reverse_sort)
+
+ if limit_output_count is not None and limit_output_count > total_entries:
+ print ("NOTE: Output count `%d' is too large; showing all %d entries" % (limit_output_count, total_entries));
+ limit_output_count = total_entries
+
+ if len(chosen_cpus) < kern.globals.real_ncpus:
+ print "NOTE: Limiting to entries from cpu%s %s" % ("s" if len(chosen_cpus) > 1 else "", str(chosen_cpus))
+
+ if limit_output_count is not None and limit_output_count < total_entries:
+ entries_to_display = limit_output_count
+ print "NOTE: Limiting to the %s" % ("first entry" if entries_to_display == 1 else ("first %d entries" % entries_to_display))
+ else:
+ entries_to_display = total_entries
+
+ print headerString()
+
+ for x in xrange(entries_to_display):
+ print entryString(entries[x])
+
+ if backtraces:
+ for btidx in range(max_backtraces):
+ nextbt = entries[x][IDX_RINGENTRY].backtrace[btidx]
+ if nextbt == 0:
+ break
+ print "\t" + GetSourceInformationForAddress(nextbt)
+
+
+@lldb_command('iotrace', 'C:N:S:RB')
+def IOTrace_cmd(cmd_args=[], cmd_options={}):
+ """ Prints the iotrace ring buffers for all CPUs by default.
+ Arguments:
+ -B : Print backtraces for each ring entry
+ -C <cpuSpec#>[,...,<cpuSpec#N>] : Limit trace entries to those generated by the specified CPUs (each cpuSpec can be a
+ single CPU number or a range separated by a dash (e.g. "0-3"))
+ -N <count> : Limit output to the first <count> entries (across all chosen CPUs)
+ -R : Display results in reverse-sorted order (oldest first; default is newest-first)
+ -S <sort_key_field_name> : Sort output by specified iotrace_entry_t field name (instead of by timestamp)
+ """
+ MAX_IOTRACE_BACKTRACES = 16
+
+ if kern.arch != "x86_64":
+ print "Sorry, iotrace is an x86-only command."
+ return
+
+ hdrString = lambda : "%-19s %-8s %-10s %-20s SZ %-18s %-17s DATA" % (
+ "START TIME",
+ "DURATION",
+ "CPU#[RIDX]",
+ " TYPE",
+ " VIRT ADDR",
+ " PHYS ADDR")
+
+ entryString = lambda x : "%-20u(%6u) %6s[%02d] %-20s %-2d 0x%016x 0x%016x 0x%x" % (
+ x[IDX_RINGENTRY].start_time_abs,
+ x[IDX_RINGENTRY].duration,
+ "CPU%d" % x[IDX_CPU],
+ x[IDX_RINGPOS],
+ str(x[IDX_RINGENTRY].iotype).split("=")[1].strip(),
+ x[IDX_RINGENTRY].size,
+ x[IDX_RINGENTRY].vaddr,
+ x[IDX_RINGENTRY].paddr,
+ x[IDX_RINGENTRY].val)
+
+ Trace_cmd(cmd_args, cmd_options, hdrString, entryString, kern.globals.iotrace_ring, kern.globals.iotrace_entries_per_cpu, MAX_IOTRACE_BACKTRACES)
+
+
+@lldb_command('ttrace', 'C:N:S:RB')
+def TrapTrace_cmd(cmd_args=[], cmd_options={}):
+ """ Prints the iotrace ring buffers for all CPUs by default.
+ Arguments:
+ -B : Print backtraces for each ring entry
+ -C <cpuSpec#>[,...,<cpuSpec#N>] : Limit trace entries to those generated by the specified CPUs (each cpuSpec can be a
+ single CPU number or a range separated by a dash (e.g. "0-3"))
+ -N <count> : Limit output to the first <count> entries (across all chosen CPUs)
+ -R : Display results in reverse-sorted order (oldest first; default is newest-first)
+ -S <sort_key_field_name> : Sort output by specified traptrace_entry_t field name (instead of by timestamp)
+ """
+ MAX_TRAPTRACE_BACKTRACES = 8
+
+ if kern.arch != "x86_64":
+ print "Sorry, ttrace is an x86-only command."
+ return
+
+ hdrString = lambda : "%-30s CPU#[RIDX] VECT INTERRUPTED_THREAD PREMLV INTRLV INTERRUPTED_PC" % (
+ "START TIME (DURATION [ns])")
+ entryString = lambda x : "%-20u(%6s) %8s[%02d] 0x%02x 0x%016x %6d %6d %s" % (
+ x[IDX_RINGENTRY].start_time_abs,
+ str(x[IDX_RINGENTRY].duration) if hex(x[IDX_RINGENTRY].duration) != "0xffffffffffffffff" else 'inprog',
+ "CPU%d" % x[IDX_CPU],
+ x[IDX_RINGPOS],
+ int(x[IDX_RINGENTRY].vector),
+ x[IDX_RINGENTRY].curthread,
+ x[IDX_RINGENTRY].curpl,
+ x[IDX_RINGENTRY].curil,
+ GetSourceInformationForAddress(x[IDX_RINGENTRY].interrupted_pc))
+
+ Trace_cmd(cmd_args, cmd_options, hdrString, entryString, kern.globals.traptrace_ring,
+ kern.globals.traptrace_entries_per_cpu, MAX_TRAPTRACE_BACKTRACES)
+
+
+@lldb_command('showsysctls', 'P:')
+def ShowSysctls(cmd_args=[], cmd_options={}):
+ """ Walks the list of sysctl data structures, printing out each during traversal.
+ Arguments:
+ -P <string> : Limit output to sysctls starting with the specified prefix.
+ """
+ if '-P' in cmd_options:
+ _ShowSysctl_prefix = cmd_options['-P']
+ allowed_prefixes = _ShowSysctl_prefix.split('.')
+ if allowed_prefixes:
+ for x in xrange(1, len(allowed_prefixes)):
+ allowed_prefixes[x] = allowed_prefixes[x - 1] + "." + allowed_prefixes[x]
+ else:
+ _ShowSysctl_prefix = ''
+ allowed_prefixes = []
+ def IterateSysctls(oid, parent_str, i):
+ headp = oid
+ parentstr = "<none>" if parent_str is None else parent_str
+ for pp in IterateListEntry(headp, 'struct sysctl_oid *', 'oid_link', 's'):
+ type = pp.oid_kind & 0xf
+ next_parent = str(pp.oid_name)
+ if parent_str is not None:
+ next_parent = parent_str + "." + next_parent
+ st = (" " * i) + str(pp.GetSBValue().Dereference()).replace("\n", "\n" + (" " * i))
+ if type == 1 and pp.oid_arg1 != 0:
+ # Check allowed_prefixes to see if we can recurse from root to the allowed prefix.
+ # To recurse further, we need to check only the the next parent starts with the user-specified
+ # prefix
+ if next_parent not in allowed_prefixes and next_parent.startswith(_ShowSysctl_prefix) is False:
+ continue
+ print 'parent = "%s"' % parentstr, st[st.find("{"):]
+ IterateSysctls(Cast(pp.oid_arg1, "struct sysctl_oid_list *"), next_parent, i + 2)
+ elif _ShowSysctl_prefix == '' or next_parent.startswith(_ShowSysctl_prefix):
+ print ('parent = "%s"' % parentstr), st[st.find("{"):]
+ IterateSysctls(kern.globals.sysctl__children, None, 0)
+
from memory import *
from process import *
-from ipc import *
+from ipc import *
from pmap import *
from ioreg import *
from mbufs import *
from net import *
+from skywalk import *
from kdp import *
from userspace import *
from pci import *
from misc import *
from apic import *
from scheduler import *
-from atm import *
from structanalyze import *
from ipcimportancedetail import *
from bank import *
+from turnstile import *
+from kasan import *
from kauth import *
from waitq import *
from usertaskgdbserver import *
+from ktrace import *
+from pgtrace import *
+from xnutriage import *
+from kevent import *
+from workqueue import *
+from ulock import *
+from ntstat import *
+from zonetriage import *
+from sysreg import *