6 def ReadPhysInt(phys_addr
, bitsize
= 64, cpuval
= None):
7 """ Read a physical memory data based on address.
9 phys_addr : int - Physical address to read
10 bitsize : int - defines how many bytes to read. defaults to 64 bit
11 cpuval : None (optional)
13 int - int value read from memory. in case of failure 0xBAD10AD is returned.
15 if "kdp" == GetConnectionProtocol():
16 return KDPReadPhysMEM(phys_addr
, bitsize
)
18 #NO KDP. Attempt to use physical memory
19 paddr_in_kva
= kern
.PhysToKernelVirt(long(phys_addr
))
22 return kern
.GetValueFromAddress(paddr_in_kva
, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
24 return kern
.GetValueFromAddress(paddr_in_kva
, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
26 return kern
.GetValueFromAddress(paddr_in_kva
, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
28 return kern
.GetValueFromAddress(paddr_in_kva
, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
31 @lldb_command('readphys')
32 def ReadPhys(cmd_args
= None):
33 """ Reads the specified untranslated address
34 The argument is interpreted as a physical address, and the 64-bit word
35 addressed is displayed.
36 usage: readphys <nbits> <address>
38 address: 1234 or 0x1234
40 if cmd_args
== None or len(cmd_args
) < 2:
41 print "Insufficient arguments.", ReadPhys
.__doc
__
44 nbits
= ArgumentStringToInt(cmd_args
[0])
45 phys_addr
= ArgumentStringToInt(cmd_args
[1])
46 print "{0: <#x}".format(ReadPhysInt(phys_addr
, nbits
))
49 lldb_alias('readphys8', 'readphys 8 ')
50 lldb_alias('readphys16', 'readphys 16 ')
51 lldb_alias('readphys32', 'readphys 32 ')
52 lldb_alias('readphys64', 'readphys 64 ')
54 def KDPReadPhysMEM(address
, bits
):
55 """ Setup the state for READPHYSMEM64 commands for reading data via kdp
57 address : int - address where to read the data from
58 bits : int - number of bits in the intval (8/16/32/64)
60 int: read value from memory.
61 0xBAD10AD: if failed to read data.
64 if "kdp" != GetConnectionProtocol():
65 print "Target is not connected over kdp. Nothing to do here."
68 input_address
= unsigned(addressof(kern
.globals.manual_pkt
.input))
69 len_address
= unsigned(addressof(kern
.globals.manual_pkt
.len))
70 data_address
= unsigned(addressof(kern
.globals.manual_pkt
.data
))
71 if not WriteInt32ToMemoryAddress(0, input_address
):
74 kdp_pkt_size
= GetType('kdp_readphysmem64_req_t').GetByteSize()
75 if not WriteInt32ToMemoryAddress(kdp_pkt_size
, len_address
):
78 data_addr
= int(addressof(kern
.globals.manual_pkt
))
79 pkt
= kern
.GetValueFromAddress(data_addr
, 'kdp_readphysmem64_req_t *')
81 header_value
=GetKDPPacketHeaderInt(request
=GetEnumValue('kdp_req_t::KDP_READPHYSMEM64'), length
=kdp_pkt_size
)
83 if ( WriteInt64ToMemoryAddress((header_value
), int(addressof(pkt
.hdr
))) and
84 WriteInt64ToMemoryAddress(address
, int(addressof(pkt
.address
))) and
85 WriteInt32ToMemoryAddress((bits
/8), int(addressof(pkt
.nbytes
))) and
86 WriteInt16ToMemoryAddress(xnudefines
.lcpu_self
, int(addressof(pkt
.lcpu
)))
89 if WriteInt32ToMemoryAddress(1, input_address
):
90 # now read data from the kdp packet
91 data_address
= unsigned(addressof(kern
.GetValueFromAddress(int(addressof(kern
.globals.manual_pkt
.data
)), 'kdp_readphysmem64_reply_t *').data
))
93 retval
= kern
.GetValueFromAddress(data_address
, 'uint64_t *').GetSBValue().Dereference().GetValueAsUnsigned()
95 retval
= kern
.GetValueFromAddress(data_address
, 'uint32_t *').GetSBValue().Dereference().GetValueAsUnsigned()
97 retval
= kern
.GetValueFromAddress(data_address
, 'uint16_t *').GetSBValue().Dereference().GetValueAsUnsigned()
99 retval
= kern
.GetValueFromAddress(data_address
, 'uint8_t *').GetSBValue().Dereference().GetValueAsUnsigned()
103 def KDPWritePhysMEM(address
, intval
, bits
):
104 """ Setup the state for WRITEPHYSMEM64 commands for saving data in kdp
106 address : int - address where to save the data
107 intval : int - integer value to be stored in memory
108 bits : int - number of bits in the intval (8/16/32/64)
110 boolean: True if the write succeeded.
112 if "kdp" != GetConnectionProtocol():
113 print "Target is not connected over kdp. Nothing to do here."
115 input_address
= unsigned(addressof(kern
.globals.manual_pkt
.input))
116 len_address
= unsigned(addressof(kern
.globals.manual_pkt
.len))
117 data_address
= unsigned(addressof(kern
.globals.manual_pkt
.data
))
118 if not WriteInt32ToMemoryAddress(0, input_address
):
121 kdp_pkt_size
= GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits
/ 8)
122 if not WriteInt32ToMemoryAddress(kdp_pkt_size
, len_address
):
125 data_addr
= int(addressof(kern
.globals.manual_pkt
))
126 pkt
= kern
.GetValueFromAddress(data_addr
, 'kdp_writephysmem64_req_t *')
128 header_value
=GetKDPPacketHeaderInt(request
=GetEnumValue('kdp_req_t::KDP_WRITEPHYSMEM64'), length
=kdp_pkt_size
)
130 if ( WriteInt64ToMemoryAddress((header_value
), int(addressof(pkt
.hdr
))) and
131 WriteInt64ToMemoryAddress(address
, int(addressof(pkt
.address
))) and
132 WriteInt32ToMemoryAddress((bits
/8), int(addressof(pkt
.nbytes
))) and
133 WriteInt16ToMemoryAddress(xnudefines
.lcpu_self
, int(addressof(pkt
.lcpu
)))
137 if not WriteInt8ToMemoryAddress(intval
, int(addressof(pkt
.data
))):
140 if not WriteInt16ToMemoryAddress(intval
, int(addressof(pkt
.data
))):
143 if not WriteInt32ToMemoryAddress(intval
, int(addressof(pkt
.data
))):
146 if not WriteInt64ToMemoryAddress(intval
, int(addressof(pkt
.data
))):
148 if WriteInt32ToMemoryAddress(1, input_address
):
153 def WritePhysInt(phys_addr
, int_val
, bitsize
= 64):
154 """ Write and integer value in a physical memory data based on address.
156 phys_addr : int - Physical address to read
157 int_val : int - int value to write in memory
158 bitsize : int - defines how many bytes to read. defaults to 64 bit
160 bool - True if write was successful.
162 if "kdp" == GetConnectionProtocol():
163 if not KDPWritePhysMEM(phys_addr
, int_val
, bitsize
):
164 print "Failed to write via KDP."
167 #We are not connected via KDP. So do manual math and savings.
168 print "Failed: Write to physical memory is not supported for %s connection." % GetConnectionProtocol()
171 @lldb_command('writephys')
172 def WritePhys(cmd_args
=None):
173 """ writes to the specified untranslated address
174 The argument is interpreted as a physical address, and the 64-bit word
175 addressed is displayed.
176 usage: writephys <nbits> <address> <value>
178 address: 1234 or 0x1234
179 value: int value to be written
180 ex. (lldb)writephys 16 0x12345abcd 0x25
182 if cmd_args
== None or len(cmd_args
) < 3:
183 print "Invalid arguments.", WritePhys
.__doc
__
185 nbits
= ArgumentStringToInt(cmd_args
[0])
186 phys_addr
= ArgumentStringToInt(cmd_args
[1])
187 int_value
= ArgumentStringToInt(cmd_args
[2])
188 print WritePhysInt(phys_addr
, int_value
, nbits
)
191 lldb_alias('writephys8', 'writephys 8 ')
192 lldb_alias('writephys16', 'writephys 16 ')
193 lldb_alias('writephys32', 'writephys 32 ')
194 lldb_alias('writephys64', 'writephys 64 ')
197 def _PT_Step(paddr
, index
, verbose_level
= vSCRIPT
):
199 Step to lower-level page table and print attributes
200 paddr: current page table entry physical address
201 index: current page table entry index (0..511)
202 verbose_level: vHUMAN: print nothing
203 vSCRIPT: print basic information
204 vDETAIL: print basic information and hex table dump
205 returns: (pt_paddr, pt_valid, pt_large)
206 pt_paddr: next level page table entry physical address
208 pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
210 pt_large: 1 if kgm_pt_paddr is a page frame address
211 of a large page and not another page table entry
213 entry_addr
= paddr
+ (8 * index
)
214 entry
= ReadPhysInt(entry_addr
, 64, xnudefines
.lcpu_self
)
216 if verbose_level
>= vDETAIL
:
217 for pte_loop
in range(0, 512):
218 paddr_tmp
= paddr
+ (8 * pte_loop
)
219 out_string
+= "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp
, ReadPhysInt(paddr_tmp
, 64, xnudefines
.lcpu_self
))
220 paddr_mask
= ~
((0xfff<<52) |
0xfff)
221 paddr_large_mask
= ~
((0xfff<<52) |
0x1fffff)
225 if verbose_level
< vSCRIPT
:
229 pt_paddr
= entry
& paddr_mask
230 if entry
& (0x1 <<7):
232 pt_paddr
= entry
& paddr_large_mask
234 out_string
+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr
, entry
)
236 out_string
+= " valid"
237 pt_paddr
= entry
& paddr_mask
240 out_string
+= " invalid"
243 if entry
& (0x1 << 62):
244 out_string
+= " compressed"
245 #Stop decoding other bits
247 if entry
& (0x1 << 1):
248 out_string
+= " writable"
250 out_string
+= " read-only"
252 if entry
& (0x1 << 2):
253 out_string
+= " user"
255 out_string
+= " supervisor"
257 if entry
& (0x1 << 3):
260 if entry
& (0x1 << 4):
263 if entry
& (0x1 << 5):
264 out_string
+= " accessed"
266 if entry
& (0x1 << 6):
267 out_string
+= " dirty"
269 if entry
& (0x1 << 7):
270 out_string
+= " large"
275 if entry
& (0x1 << 8):
276 out_string
+= " global"
278 if entry
& (0x3 << 9):
279 out_string
+= " avail:{0:x}".format((entry
>> 9) & 0x3)
281 if entry
& (0x1 << 63):
282 out_string
+= " noexec"
284 return (pt_paddr
, pt_valid
, pt_large
)
286 def _PT_StepEPT(paddr
, index
, verbose_level
= vSCRIPT
):
288 Step to lower-level page table and print attributes for EPT pmap
289 paddr: current page table entry physical address
290 index: current page table entry index (0..511)
291 verbose_level: vHUMAN: print nothing
292 vSCRIPT: print basic information
293 vDETAIL: print basic information and hex table dump
294 returns: (pt_paddr, pt_valid, pt_large)
295 pt_paddr: next level page table entry physical address
297 pt_valid: 1 if $kgm_pt_paddr is valid, 0 if the walk
299 pt_large: 1 if kgm_pt_paddr is a page frame address
300 of a large page and not another page table entry
302 entry_addr
= paddr
+ (8 * index
)
303 entry
= ReadPhysInt(entry_addr
, 64, xnudefines
.lcpu_self
)
305 if verbose_level
>= vDETAIL
:
306 for pte_loop
in range(0, 512):
307 paddr_tmp
= paddr
+ (8 * pte_loop
)
308 out_string
+= "{0: <#020x}:\t {1: <#020x}\n".format(paddr_tmp
, ReadPhysInt(paddr_tmp
, 64, xnudefines
.lcpu_self
))
309 paddr_mask
= ~
((0xfff<<52) |
0xfff)
310 paddr_large_mask
= ~
((0xfff<<52) |
0x1fffff)
314 if verbose_level
< vSCRIPT
:
318 pt_paddr
= entry
& paddr_mask
319 if entry
& (0x1 <<7):
321 pt_paddr
= entry
& paddr_large_mask
323 out_string
+= "{0: <#020x}:\n\t{1:#020x}\n\t".format(entry_addr
, entry
)
325 out_string
+= "valid"
326 pt_paddr
= entry
& paddr_mask
329 out_string
+= "invalid"
332 if entry
& (0x1 << 62):
333 out_string
+= " compressed"
334 #Stop decoding other bits
337 out_string
+= " readable"
339 out_string
+= " no read"
340 if entry
& (0x1 << 1):
341 out_string
+= " writable"
343 out_string
+= " no write"
345 if entry
& (0x1 << 2):
346 out_string
+= " executable"
348 out_string
+= " no exec"
352 out_string
+= " cache-WB"
354 out_string
+= " cache-WP"
356 out_string
+= " cache-WT"
358 out_string
+= " cache-WC"
360 out_string
+= " cache-NC"
362 if (entry
& 0x40) == 0x40:
363 out_string
+= " Ignore-PTA"
365 if (entry
& 0x100) == 0x100:
366 out_string
+= " accessed"
368 if (entry
& 0x200) == 0x200:
369 out_string
+= " dirty"
371 if entry
& (0x1 << 7):
372 out_string
+= " large"
377 return (pt_paddr
, pt_valid
, pt_large
)
379 def _PmapL4Walk(pmap_addr_val
,vaddr
, ept_pmap
, verbose_level
= vSCRIPT
):
380 """ Walk the l4 pmap entry.
381 params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t
382 vaddr : int - virtual address to walk
384 is_cpu64_bit
= int(kern
.globals.cpu_64bit
)
385 pt_paddr
= unsigned(pmap_addr_val
)
386 pt_valid
= (unsigned(pmap_addr_val
) != 0)
389 if pt_valid
and is_cpu64_bit
:
390 # Lookup bits 47:39 of linear address in PML4T
391 pt_index
= (vaddr
>> 39) & 0x1ff
392 pframe_offset
= vaddr
& 0x7fffffffff
393 if verbose_level
> vHUMAN
:
394 print "pml4 (index {0:d}):".format(pt_index
)
396 (pt_paddr
, pt_valid
, pt_large
) = _PT_Step(pt_paddr
, pt_index
, verbose_level
)
398 (pt_paddr
, pt_valid
, pt_large
) = _PT_StepEPT(pt_paddr
, pt_index
, verbose_level
)
400 # Lookup bits 38:30 of the linear address in PDPT
401 pt_index
= (vaddr
>> 30) & 0x1ff
402 pframe_offset
= vaddr
& 0x3fffffff
403 if verbose_level
> vHUMAN
:
404 print "pdpt (index {0:d}):".format(pt_index
)
406 (pt_paddr
, pt_valid
, pt_large
) = _PT_Step(pt_paddr
, pt_index
, verbose_level
)
408 (pt_paddr
, pt_valid
, pt_large
) = _PT_StepEPT(pt_paddr
, pt_index
, verbose_level
)
409 if pt_valid
and not pt_large
:
410 #Lookup bits 29:21 of the linear address in PDPT
411 pt_index
= (vaddr
>> 21) & 0x1ff
412 pframe_offset
= vaddr
& 0x1fffff
413 if verbose_level
> vHUMAN
:
414 print "pdt (index {0:d}):".format(pt_index
)
416 (pt_paddr
, pt_valid
, pt_large
) = _PT_Step(pt_paddr
, pt_index
, verbose_level
)
418 (pt_paddr
, pt_valid
, pt_large
) = _PT_StepEPT(pt_paddr
, pt_index
, verbose_level
)
419 if pt_valid
and not pt_large
:
420 #Lookup bits 20:21 of linear address in PT
421 pt_index
= (vaddr
>> 12) & 0x1ff
422 pframe_offset
= vaddr
& 0xfff
423 if verbose_level
> vHUMAN
:
424 print "pt (index {0:d}):".format(pt_index
)
426 (pt_paddr
, pt_valid
, pt_large
) = _PT_Step(pt_paddr
, pt_index
, verbose_level
)
428 (pt_paddr
, pt_valid
, pt_large
) = _PT_StepEPT(pt_paddr
, pt_index
, verbose_level
)
430 paddr_isvalid
= False
432 paddr
= pt_paddr
+ pframe_offset
435 if verbose_level
> vHUMAN
:
437 pvalue
= ReadPhysInt(paddr
, 32, xnudefines
.lcpu_self
)
438 print "phys {0: <#020x}: {1: <#020x}".format(paddr
, pvalue
)
440 print "no translation"
444 def PmapDecodeTTEARM(tte
, level
, verbose_level
):
445 """ Display the bits of an ARM translation table or page table entry
446 in human-readable form.
447 tte: integer value of the TTE/PTE
448 level: translation table level. Valid values are 1 or 2.
449 verbose_level: verbosity. vHUMAN, vSCRIPT, vDETAIL
452 if level
== 1 and (tte
& 0x3) == 0x2:
453 if verbose_level
< vSCRIPT
:
456 #bit [1:0] evaluated in PmapWalkARM
458 b_bit
= (tte
& 0x4) >> 2
460 c_bit
= (tte
& 0x8) >> 3
463 out_string
+= "no-execute"
465 out_string
+= "execute"
466 #Domain bit [8:5] if not supersection
467 if (tte
& 0x40000) == 0x0:
468 out_string
+= " domain ({:d})".format(((tte
& 0x1e0) >> 5) )
470 out_string
+= " imp({:d})".format( ((tte
& 0x200) >> 9) )
471 # AP bit 15 and [11:10] merged to a single 3 bit value
472 access
= ( (tte
& 0xc00) >> 10 ) |
((tte
& 0x8000) >> 13)
473 out_string
+= xnudefines
.arm_level2_access_strings
[access
]
476 tex_bits
= ((tte
& 0x7000) >> 12)
477 #Print TEX, C , B all together
478 out_string
+= " TEX:C:B({:d}{:d}{:d}:{:d}:{:d})".format(
479 1 if (tex_bits
& 0x4) else 0,
480 1 if (tex_bits
& 0x2) else 0,
481 1 if (tex_bits
& 0x1) else 0,
487 out_string
+= " shareable"
489 out_string
+= " not-shareable"
492 out_string
+= " not-global"
494 out_string
+= " global"
495 # Supersection bit 18
497 out_string
+= " supersection"
499 out_string
+= " section"
502 out_string
+= " no-secure"
504 out_string
+= " secure"
506 elif level
== 1 and (tte
& 0x3) == 0x1:
508 if verbose_level
>= vSCRIPT
:
509 # bit [1:0] evaluated in PmapWalkARM
512 out_string
+= ' no-secure'
514 out_string
+= ' secure'
516 out_string
+= " domain({:d})".format(((tte
& 0x1e0) >> 5))
518 out_string
+= " imp({:d})".format( ((tte
& 0x200) >> 9))
523 if verbose_level
>= vSCRIPT
:
524 if (pte
& 0x3) == 0x0:
525 out_string
+= " invalid"
527 if (pte
& 0x3) == 0x1:
528 out_string
+= " large"
530 if pte
& 0x8000 == 0x8000:
531 out_string
+= " no-execute"
533 out_string
+= " execute"
535 out_string
+= " small"
537 if (pte
& 0x1) == 0x01:
538 out_string
+= " no-execute"
540 out_string
+= " execute"
542 b_bit
= (pte
& 0x4) >> 2
543 c_bit
= (pte
& 0x8) >> 3
544 # AP bit 9 and [5:4], merged to a single 3-bit value
545 access
= (pte
& 0x30) >> 4 |
(pte
& 0x200) >> 7
546 out_string
+= xnudefines
.arm_level2_access_strings
[access
]
548 #TEX bit [14:12] for large, [8:6] for small
549 tex_bits
= ((pte
& 0x1c0) >> 6)
550 if (pte
& 0x3) == 0x1:
551 tex_bits
= ((pte
& 0x7000) >> 12)
553 # Print TEX, C , B alltogether
554 out_string
+= " TEX:C:B({:d}{:d}{:d}:{:d}:{:d})".format(
555 1 if (tex_bits
& 0x4) else 0,
556 1 if (tex_bits
& 0x2) else 0,
557 1 if (tex_bits
& 0x1) else 0,
563 out_string
+= " shareable"
565 out_string
+= " not-shareable"
569 out_string
+= " not-global"
571 out_string
+= " global"
576 def _PmapWalkARMLevel1Section(tte
, vaddr
, verbose_level
= vSCRIPT
):
578 #Supersection or just section?
579 if (tte
& 0x40000) == 0x40000:
580 paddr
= ( (tte
& 0xFF000000) |
(vaddr
& 0x00FFFFFF) )
582 paddr
= ( (tte
& 0xFFF00000) |
(vaddr
& 0x000FFFFF) )
584 if verbose_level
>= vSCRIPT
:
585 print "{0: <#020x}\n\t{1: <#020x}\n\t".format(addressof(tte
), tte
),
587 PmapDecodeTTEARM(tte
, 1, verbose_level
)
593 def _PmapWalkARMLevel2(tte
, vaddr
, verbose_level
= vSCRIPT
):
594 """ Pmap walk the level 2 tte.
598 returns: str - description of the tte + additional informaiton based on verbose_level
600 pte_base
= kern
.PhysToKernelVirt(tte
& 0xFFFFFC00)
601 pte_index
= (vaddr
>> 12) & 0xFF
602 pte_base_val
= kern
.GetValueFromAddress(pte_base
, 'pt_entry_t *')
603 pte
= pte_base_val
[pte_index
]
607 paddr
= (unsigned(pte
) & 0xFFFFF000) |
(vaddr
& 0xFFF)
609 if verbose_level
>= vSCRIPT
:
610 print "{0: <#020x}\n\t{1: <#020x}\n\t".format(addressof(tte
), tte
),
612 PmapDecodeTTEARM(tte
, 1, verbose_level
)
613 if verbose_level
>= vSCRIPT
:
614 print "second-level table (index {:d}):".format(pte_index
)
615 if verbose_level
>= vDETAIL
:
617 tmp
= pte_base_val
[i
]
618 print "{0: <#020x}:\t{1: <#020x}".format(addressof(tmp
), unsigned(tmp
))
620 if verbose_level
>= vSCRIPT
:
621 print " {0: <#020x}\n\t{1: <#020x}\n\t".format(addressof(pte
), unsigned(pte
)),
623 PmapDecodeTTEARM(pte
, 2, verbose_level
)
626 #end of level 2 walking of arm
629 def PmapWalkARM(pmap
, vaddr
, verbose_level
= vHUMAN
):
630 """ Pmap walking for ARM kernel.
632 pmapval: core.value - representing pmap_t in kernel
633 vaddr: int - integer representing virtual address to walk
636 # shift by TTESHIFT (20) to get tte index
637 # Assume all L1 indexing starts at VA 0...for our purposes it does,
638 # as that's where all user pmaps start, and the kernel pmap contains
639 # 4 L1 pages (the lower 2 of which are unused after bootstrap)
640 tte_index
= vaddr
>> 20
641 tte
= pmap
.tte
[tte_index
]
642 if verbose_level
>= vSCRIPT
:
643 print "First-level table (index {:d}):".format(tte_index
)
644 if verbose_level
>= vDETAIL
:
645 for i
in range(0, pmap
.tte_index_max
):
646 ptr
= unsigned(addressof(pmap
.tte
[i
]))
647 val
= unsigned(pmap
.tte
[i
])
648 print "{0: <#020x}:\t {1: <#020x}".format(ptr
, val
)
649 if (tte
& 0x3) == 0x1:
650 paddr
= _PmapWalkARMLevel2(tte
, vaddr
, verbose_level
)
651 elif (tte
& 0x3) == 0x2 :
652 paddr
= _PmapWalkARMLevel1Section(tte
, vaddr
, verbose_level
)
655 if verbose_level
>= vSCRIPT
:
656 print "Invalid First-Level Translation Table Entry: {0: #020x}".format(tte
)
658 if verbose_level
>= vHUMAN
:
660 print "Translation of {:#x} is {:#x}.".format(vaddr
, paddr
)
662 print "(no translation)"
666 def PmapWalkX86_64(pmapval
, vaddr
, verbose_level
= vSCRIPT
):
668 params: pmapval - core.value representing pmap_t in kernel
669 vaddr: int - int representing virtual address to walk
671 if pmapval
.pm_cr3
!= 0:
672 if verbose_level
> vHUMAN
:
673 print "Using normal Intel PMAP from pm_cr3\n"
674 return _PmapL4Walk(pmapval
.pm_cr3
, vaddr
, 0, config
['verbosity'])
676 if verbose_level
> vHUMAN
:
677 print "Using EPT pmap from pm_eptp\n"
678 return _PmapL4Walk(pmapval
.pm_eptp
, vaddr
, 1, config
['verbosity'])
680 def assert_64bit(val
):
685 ARM64_VMADDR_BITS
= 48
687 def PmapBlockOffsetMaskARM64(level
):
688 assert level
>= 1 and level
<= 3
689 page_size
= kern
.globals.arm_hardware_page_size
690 ttentries
= (page_size
/ ARM64_TTE_SIZE
)
691 return page_size
* (ttentries
** (3 - level
)) - 1
693 def PmapBlockBaseMaskARM64(level
):
694 assert level
>= 1 and level
<= 3
695 page_size
= kern
.globals.arm_hardware_page_size
696 return ((1 << ARM64_VMADDR_BITS
) - 1) & ~
PmapBlockOffsetMaskARM64(level
)
698 def PmapIndexMaskARM64(level
):
699 assert level
>= 1 and level
<= 3
700 page_size
= kern
.globals.arm_hardware_page_size
701 ttentries
= (page_size
/ ARM64_TTE_SIZE
)
702 return page_size
* (ttentries
** (3 - level
) * (ttentries
- 1))
704 def PmapIndexDivideARM64(level
):
705 assert level
>= 1 and level
<= 3
706 page_size
= kern
.globals.arm_hardware_page_size
707 ttentries
= (page_size
/ ARM64_TTE_SIZE
)
708 return page_size
* (ttentries
** (3 - level
))
710 def PmapTTnIndexARM64(vaddr
, level
):
711 assert(type(vaddr
) in (long, int))
714 return (vaddr
& PmapIndexMaskARM64(level
)) // PmapIndexDivideARM64(level
)
716 def PmapDecodeTTEARM64(tte
, level
):
717 """ Display the bits of an ARM64 translation table or page table entry
718 in human-readable form.
719 tte: integer value of the TTE/PTE
720 level: translation table level. Valid values are 1, 2, or 3.
722 assert(type(tte
) == long)
723 assert(type(level
) == int)
727 if (tte
& 0x2 == 0x2) and (level
!= 0x3):
728 print "Type = Table pointer."
729 print "Table addr = {:#x}.".format(tte
& 0xfffffffff000)
730 print "PXN = {:#x}.".format((tte
>> 59) & 0x1)
731 print "XN = {:#x}.".format((tte
>> 60) & 0x1)
732 print "AP = {:#x}.".format((tte
>> 61) & 0x3)
733 print "NS = {:#x}".format(tte
>> 63)
735 print "Type = Block."
736 print "AttrIdx = {:#x}.".format((tte
>> 2) & 0x7)
737 print "NS = {:#x}.".format((tte
>> 5) & 0x1)
738 print "AP = {:#x}.".format((tte
>> 6) & 0x3)
739 print "SH = {:#x}.".format((tte
>> 8) & 0x3)
740 print "AF = {:#x}.".format((tte
>> 10) & 0x1)
741 print "nG = {:#x}.".format((tte
>> 11) & 0x1)
742 print "HINT = {:#x}.".format((tte
>> 52) & 0x1)
743 print "PXN = {:#x}.".format((tte
>> 53) & 0x1)
744 print "XN = {:#x}.".format((tte
>> 54) & 0x1)
745 print "SW Use = {:#x}.".format((tte
>> 55) & 0xf)
751 def PmapWalkARM64(pmap
, vaddr
, verbose_level
= vHUMAN
):
752 assert(type(pmap
) == core
.cvalue
.value
)
753 assert(type(vaddr
) in (long, int))
754 page_size
= kern
.globals.arm_hardware_page_size
755 page_offset_mask
= (page_size
- 1)
756 page_base_mask
= ((1 << ARM64_VMADDR_BITS
) - 1) & (~page_offset_mask
)
762 tt1_index
= PmapTTnIndexARM64(vaddr
, 1)
763 tt2_index
= PmapTTnIndexARM64(vaddr
, 2)
764 tt3_index
= PmapTTnIndexARM64(vaddr
, 3)
766 # The pmap starts at a page tabel level that is defined by register
767 # values; the kernel exports the root level for LLDB
768 level
= kern
.globals.arm64_root_pgtable_level
772 root_tt_index
= tt0_index
774 root_tt_index
= tt1_index
776 root_tt_index
= tt2_index
778 root_tt_index
= tt3_index
780 # If the root of the page table is not a full page, we need to
782 root_tt_index
= root_tt_index
% unsigned(kern
.globals.arm64_root_pgtable_num_ttes
)
784 tte
= long(unsigned(pmap
.tte
[root_tt_index
]))
785 assert(type(tte
) == long)
791 # This is unsupported at the moment, as no kernel configurations use L0
796 if verbose_level
>= vSCRIPT
:
797 print "L1 entry: {:#x}".format(tte
)
798 if verbose_level
>= vDETAIL
:
799 PmapDecodeTTEARM64(tte
, 1)
802 # Check for L1 block entry
804 # Handle L1 block entry
805 paddr
= tte
& PmapBlockBaseMaskARM64(1)
806 paddr
= paddr |
(vaddr
& PmapBlockOffsetMaskARM64(1))
807 print "phys: {:#x}".format(paddr
)
810 # Handle L1 table entry
811 l2_phys
= (tte
& page_base_mask
) + (ARM64_TTE_SIZE
* tt2_index
)
812 assert(type(l2_phys
) == long)
814 l2_virt
= kern
.PhysToKernelVirt(l2_phys
)
815 assert(type(l2_virt
) == long)
817 if verbose_level
>= vDETAIL
:
818 print "L2 physical address: {:#x}. L2 virtual address: {:#x}".format(l2_phys
, l2_virt
)
820 ttep
= kern
.GetValueFromAddress(l2_virt
, "tt_entry_t*")
821 tte
= long(unsigned(dereference(ttep
)))
822 assert(type(tte
) == long)
823 elif verbose_level
>= vHUMAN
:
824 print "L1 entry invalid: {:#x}\n".format(tte
)
828 if verbose_level
>= vSCRIPT
:
829 print "L2 entry: {:#0x}".format(tte
)
830 if verbose_level
>= vDETAIL
:
831 PmapDecodeTTEARM64(tte
, 2)
834 # Check for L2 block entry
836 # Handle L2 block entry
837 paddr
= tte
& PmapBlockBaseMaskARM64(2)
838 paddr
= paddr |
(vaddr
& PmapBlockOffsetMaskARM64(2))
841 # Handle L2 table entry
842 l3_phys
= (tte
& page_base_mask
) + (ARM64_TTE_SIZE
* tt3_index
)
843 assert(type(l3_phys
) == long)
845 l3_virt
= kern
.PhysToKernelVirt(l3_phys
)
846 assert(type(l3_virt
) == long)
848 if verbose_level
>= vDETAIL
:
849 print "L3 physical address: {:#x}. L3 virtual address: {:#x}".format(l3_phys
, l3_virt
)
851 ttep
= kern
.GetValueFromAddress(l3_virt
, "tt_entry_t*")
852 tte
= long(unsigned(dereference(ttep
)))
853 assert(type(tte
) == long)
854 elif verbose_level
>= vHUMAN
: # tte & 0x1 == 0x1
855 print "L2 entry invalid: {:#x}\n".format(tte
)
859 if verbose_level
>= vSCRIPT
:
860 print "L3 entry: {:#0x}".format(tte
)
861 if verbose_level
>= vDETAIL
:
862 PmapDecodeTTEARM64(tte
, 3)
865 paddr
= tte
& page_base_mask
866 paddr
= paddr |
(vaddr
& page_offset_mask
)
867 elif verbose_level
>= vHUMAN
:
868 print "L3 entry invalid: {:#x}\n".format(tte
)
870 # This was the leaf page table page for this request; we're done
873 # We've parsed one level, so go to the next level
877 if verbose_level
>= vHUMAN
:
879 print "Translation of {:#x} is {:#x}.".format(vaddr
, paddr
)
881 print "(no translation)"
885 def PmapWalk(pmap
, vaddr
, verbose_level
= vHUMAN
):
886 if kern
.arch
== 'x86_64':
887 return PmapWalkX86_64(pmap
, vaddr
, verbose_level
)
888 elif kern
.arch
== 'arm':
889 return PmapWalkARM(pmap
, vaddr
, verbose_level
)
890 elif kern
.arch
.startswith('arm64'):
891 return PmapWalkARM64(pmap
, vaddr
, verbose_level
)
893 raise NotImplementedError("PmapWalk does not support {0}".format(kern
.arch
))
895 @lldb_command('pmap_walk')
896 def PmapWalkHelper(cmd_args
=None):
897 """ Perform a page-table walk in <pmap> for <virtual_address>.
898 Syntax: (lldb) pmap_walk <pmap> <virtual_address> [-v] [-e]
899 Multiple -v's can be specified for increased verbosity
901 if cmd_args
== None or len(cmd_args
) < 2:
902 raise ArgumentError("Too few arguments to pmap_walk.")
904 pmap
= kern
.GetValueAsType(cmd_args
[0], 'pmap_t')
905 addr
= unsigned(kern
.GetValueFromAddress(cmd_args
[1], 'void *'))
906 PmapWalk(pmap
, addr
, config
['verbosity'])
909 @lldb_command('decode_tte')
910 def DecodeTTE(cmd_args
=None):
911 """ Decode the bits in the TTE/PTE value specified <tte_val> for translation level <level>
912 Syntax: (lldb) decode_tte <tte_val> <level>
914 if cmd_args
== None or len(cmd_args
) < 2:
915 raise ArgumentError("Too few arguments to decode_tte.")
916 if kern
.arch
== 'arm':
917 PmapDecodeTTEARM(kern
.GetValueFromAddress(cmd_args
[0], "unsigned long"), ArgumentStringToInt(cmd_args
[1]), vSCRIPT
)
918 elif kern
.arch
.startswith('arm64'):
919 PmapDecodeTTEARM64(long(kern
.GetValueFromAddress(cmd_args
[0], "unsigned long")), ArgumentStringToInt(cmd_args
[1]))
921 raise NotImplementedError("decode_tte does not support {0}".format(kern
.arch
))
924 """ Walk a physical-to-virtual reverse mapping list maintained by the arm pmap
925 pa: physical address (NOT page number). Does not need to be page-aligned
927 vm_first_phys
= unsigned(kern
.globals.vm_first_phys
)
928 vm_last_phys
= unsigned(kern
.globals.vm_last_phys
)
929 if pa
< vm_first_phys
or pa
>= vm_last_phys
:
930 raise ArgumentError("PA {:#x} is outside range of managed physical addresses: [{:#x}, {:#x})".format(pa
, vm_first_phys
, vm_last_phys
))
931 page_size
= kern
.globals.arm_hardware_page_size
932 pn
= (pa
- unsigned(kern
.globals.vm_first_phys
)) / page_size
933 pvh
= unsigned(kern
.globals.pv_head_table
[pn
])
936 print "PVH type: NULL"
939 print "PVH type: page-table descriptor ({:#x})".format(pvh
& ~
0x3)
943 print "PVH type: single PTE"
944 print "PTE {:#x}: {:#x}".format(ptep
, dereference(kern
.GetValueFromAddress(ptep
, 'pt_entry_t *')))
947 print "PVH type: PTE list"
949 pve
= kern
.GetValueFromAddress(pvep
, "pv_entry_t *")
950 if unsigned(pve
.pve_next
) & 0x1:
951 pve_str
= ' (alt acct) '
955 pvep
= unsigned(pve
.pve_next
) & ~
0x1
956 ptep
= unsigned(pve
.pve_ptep
) & ~
0x3
957 print "PVE {:#x}, PTE {:#x}{:s}: {:#x}".format(current_pvep
, ptep
, pve_str
, dereference(kern
.GetValueFromAddress(ptep
, 'pt_entry_t *')))
959 @lldb_command('pv_walk')
960 def PVWalk(cmd_args
=None):
961 """ Show mappings for <physical_address> tracked in the PV list.
962 Syntax: (lldb) pv_walk <physical_address>
964 if cmd_args
== None or len(cmd_args
) < 1:
965 raise ArgumentError("Too few arguments to pv_walk.")
966 if not kern
.arch
.startswith('arm'):
967 raise NotImplementedError("pv_walk does not support {0}".format(kern
.arch
))
968 PVWalkARM(kern
.GetValueFromAddress(cmd_args
[0], 'unsigned long'))
971 """ Display vital information about an ARM page table entry
972 pte: kernel virtual address of the PTE. Should be L3 PTE. May also work with L2 TTEs for certain devices.
974 page_size
= kern
.globals.arm_hardware_page_size
975 pn
= (pte
- unsigned(kern
.globals.gVirtBase
) + unsigned(kern
.globals.gPhysBase
) - unsigned(kern
.globals.vm_first_phys
)) / page_size
976 pvh
= kern
.globals.pv_head_table
[pn
]
978 if pvh_type
!= 0x3 and pvh_type
!= 0x0:
979 raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh
))
980 ptd
= kern
.GetValueFromAddress(pvh
& ~
0x3, 'pt_desc_t *')
981 print "descriptor: {:#x}".format(ptd
)
982 print "pmap: {:#x}".format(ptd
.pmap
)
983 pt_index
= (pte
% kern
.globals.page_size
) / page_size
984 pte_pgoff
= pte
% page_size
985 if kern
.arch
.startswith('arm64'):
986 pte_pgoff
= pte_pgoff
/ 8
987 nttes
= page_size
/ 8
989 pte_pgoff
= pte_pgoff
/ 4
990 nttes
= page_size
/ 4
991 if ptd
.pt_cnt
[pt_index
].refcnt
== 0x4000:
993 granule
= nttes
* page_size
997 print "maps VA: {:#x}".format(long(unsigned(ptd
.pt_map
[pt_index
].va
)) + (pte_pgoff
* granule
))
998 pteval
= long(unsigned(dereference(kern
.GetValueFromAddress(unsigned(pte
), 'pt_entry_t *'))))
999 print "value: {:#x}".format(pteval
)
1000 if kern
.arch
.startswith('arm64'):
1001 print "level: {:d}".format(level
)
1002 PmapDecodeTTEARM64(pteval
, level
)
1003 elif kern
.arch
== 'arm':
1004 PmapDecodeTTEARM(pteval
, 2, vSCRIPT
)
1006 @lldb_command('showpte')
1007 def ShowPTE(cmd_args
=None):
1008 """ Display vital information about the page table entry at VA <pte>
1009 Syntax: (lldb) showpte <pte_va>
1011 if cmd_args
== None or len(cmd_args
) < 1:
1012 raise ArgumentError("Too few arguments to showpte.")
1013 if not kern
.arch
.startswith('arm'):
1014 raise NotImplementedError("showpte does not support {0}".format(kern
.arch
))
1015 ShowPTEARM(kern
.GetValueFromAddress(cmd_args
[0], 'unsigned long'))
1017 def FindMappingAtLevelARM(pmap
, tt
, nttes
, level
, action
):
1018 """ Perform the specified action for all valid mappings in an ARM translation table
1019 pmap: owner of the translation table
1020 tt: translation table or page table
1021 nttes: number of entries in tt
1022 level: translation table level, 1 or 2
1023 action: callback for each valid TTE
1025 for i
in range(nttes
):
1029 if tte
& 0x3 == 0x1:
1032 paddr
= tte
& 0xFFFFFC00
1033 elif tte
& 0x3 == 0x2:
1035 if (tte
& 0x40000) == 0x40000:
1037 paddr
= tte
& 0xFF000000
1040 paddr
= tte
& 0xFFF00000
1043 elif (tte
& 0x3) == 0x1:
1046 paddr
= tte
& 0xFFFF0000
1047 elif (tte
& 0x3) != 0:
1050 paddr
= tte
& 0xFFFFF000
1053 action(pmap
, level
, type, addressof(tt
[i
]), paddr
, granule
)
1054 if level
== 1 and (tte
& 0x3) == 0x1:
1055 tt_next
= kern
.GetValueFromAddress(kern
.PhysToKernelVirt(paddr
), 'tt_entry_t *')
1056 FindMappingAtLevelARM(pmap
, tt_next
, granule
/ 4, level
+ 1, action
)
1057 except Exception as exc
:
1058 print "Unable to access tte {:#x}".format(unsigned(addressof(tt
[i
])))
1060 def FindMappingAtLevelARM64(pmap
, tt
, nttes
, level
, action
):
1061 """ Perform the specified action for all valid mappings in an ARM64 translation table
1062 pmap: owner of the translation table
1063 tt: translation table or page table
1064 nttes: number of entries in tt
1065 level: translation table level, 1 2 or 3
1066 action: callback for each valid TTE
1068 page_size
= kern
.globals.arm_hardware_page_size
1069 page_offset_mask
= (page_size
- 1)
1070 page_base_mask
= ((1 << ARM64_VMADDR_BITS
) - 1) & (~page_offset_mask
)
1071 for i
in range(nttes
):
1074 if tte
& 0x1 == 0x1:
1075 if tte
& 0x2 == 0x2:
1081 paddr
= tte
& page_base_mask
1084 granule
= PmapBlockOffsetMaskARM64(level
) + 1
1085 paddr
= tte
& PmapBlockBaseMaskARM64(level
)
1088 action(pmap
, level
, type, addressof(tt
[i
]), paddr
, granule
)
1089 if level
< 3 and (tte
& 0x2 == 0x2):
1090 tt_next
= kern
.GetValueFromAddress(kern
.PhysToKernelVirt(paddr
), 'tt_entry_t *')
1091 FindMappingAtLevelARM64(pmap
, tt_next
, granule
/ ARM64_TTE_SIZE
, level
+ 1, action
)
1092 except Exception as exc
:
1093 print "Unable to access tte {:#x}".format(unsigned(addressof(tt
[i
])))
1095 def ScanPageTables(action
, targetPmap
=None):
1096 """ Perform the specified action for all valid mappings in all page tables,
1097 optionally restricted to a single pmap.
1098 pmap: pmap whose page table should be scanned. If None, all pmaps on system will be scanned.
1100 print "Scanning all available translation tables. This may take a long time..."
1101 def ScanPmap(pmap
, action
):
1102 if kern
.arch
.startswith('arm64'):
1103 granule
= kern
.globals.arm64_root_pgtable_num_ttes
* 8
1104 elif kern
.arch
== 'arm':
1105 granule
= pmap
.tte_index_max
* 4
1106 action(pmap
, 1, 'root', pmap
.tte
, unsigned(pmap
.ttep
), granule
)
1107 if kern
.arch
.startswith('arm64'):
1108 FindMappingAtLevelARM64(pmap
, pmap
.tte
, kern
.globals.arm64_root_pgtable_num_ttes
, kern
.globals.arm64_root_pgtable_level
, action
)
1109 elif kern
.arch
== 'arm':
1110 FindMappingAtLevelARM(pmap
, pmap
.tte
, pmap
.tte_index_max
, 1, action
)
1112 if targetPmap
is not None:
1113 ScanPmap(kern
.GetValueFromAddress(targetPmap
, 'pmap_t'), action
)
1115 for pmap
in IterateQueue(kern
.globals.map_pmap_list
, 'pmap_t', 'pmaps'):
1116 ScanPmap(pmap
, action
)
1118 @lldb_command('showallmappings')
1119 def ShowAllMappings(cmd_args
=None):
1120 """ Find and display all available mappings on the system for
1121 <physical_address>. Optionally only searches the pmap
1122 specified by [<pmap>]
1123 Syntax: (lldb) showallmappings <physical_address> [<pmap>]
1124 WARNING: this macro can take a long time (up to 30min.) to complete!
1126 if cmd_args
== None or len(cmd_args
) < 1:
1127 raise ArgumentError("Too few arguments to showallmappings.")
1128 if not kern
.arch
.startswith('arm'):
1129 raise NotImplementedError("showallmappings does not support {0}".format(kern
.arch
))
1130 pa
= kern
.GetValueFromAddress(cmd_args
[0], 'unsigned long')
1132 if len(cmd_args
) > 1:
1133 targetPmap
= cmd_args
[1]
1134 def printMatchedMapping(pmap
, level
, type, tte
, paddr
, granule
):
1135 if paddr
<= pa
< (paddr
+ granule
):
1136 print "pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x})".format(pmap
, level
, type, unsigned(tte
), paddr
, paddr
+ granule
)
1137 ScanPageTables(printMatchedMapping
, targetPmap
)
1139 def checkPVList(pmap
, level
, type, tte
, paddr
, granule
):
1140 """ Checks an ARM physical-to-virtual mapping list for consistency error.
1141 pmap: owner of the translation table
1142 level: translation table level. PV lists will only be checked for L2 (arm32) or L3 (arm64) tables.
1144 tte: KVA of PTE to check for presence in PV list. If None, presence check will be skipped.
1145 paddr: physical address whose PV list should be checked. Need not be page-aligned.
1148 vm_first_phys
= unsigned(kern
.globals.vm_first_phys
)
1149 vm_last_phys
= unsigned(kern
.globals.vm_last_phys
)
1150 page_size
= kern
.globals.arm_hardware_page_size
1151 if kern
.arch
.startswith('arm64'):
1152 page_offset_mask
= (page_size
- 1)
1153 page_base_mask
= ((1 << ARM64_VMADDR_BITS
) - 1) & (~page_offset_mask
)
1154 paddr
= paddr
& page_base_mask
1156 elif kern
.arch
== 'arm':
1157 page_base_mask
= 0xFFFFF000
1158 paddr
= paddr
& page_base_mask
1160 if level
< max_level
or paddr
< vm_first_phys
or paddr
>= vm_last_phys
:
1162 pn
= (paddr
- vm_first_phys
) / page_size
1163 pvh
= unsigned(kern
.globals.pv_head_table
[pn
])
1164 pvh_type
= pvh
& 0x3
1165 if pmap
is not None:
1166 pmap_str
= "pmap: {:#x}: ".format(pmap
)
1170 tte_str
= "pte {:#x} ({:#x}): ".format(unsigned(tte
), paddr
)
1172 tte_str
= "paddr {:#x}: ".format(paddr
)
1173 if pvh_type
== 0 or pvh_type
== 3:
1174 print "{:s}{:s}unexpected PVH type {:d}".format(pmap_str
, tte_str
, pvh_type
)
1177 if tte
is not None and ptep
!= unsigned(tte
):
1178 print "{:s}{:s}PVH mismatch ({:#x})".format(pmap_str
, tte_str
, ptep
)
1180 pte
= long(unsigned(dereference(kern
.GetValueFromAddress(ptep
, 'pt_entry_t *')))) & page_base_mask
1182 print "{:s}{:s}PVH {:#x} maps wrong page ({:#x}) ".format(pmap_str
, tte_str
, ptep
, pte
)
1183 except Exception as exc
:
1184 print "{:s}{:s}Unable to read PVH {:#x}".format(pmap_str
, tte_str
, ptep
)
1189 pve
= kern
.GetValueFromAddress(pvep
, "pv_entry_t *")
1190 pvep
= unsigned(pve
.pve_next
) & ~
0x1
1191 ptep
= unsigned(pve
.pve_ptep
) & ~
0x3
1192 if tte
is not None and ptep
== unsigned(tte
):
1195 pte
= long(unsigned(dereference(kern
.GetValueFromAddress(ptep
, 'pt_entry_t *')))) & page_base_mask
1197 print "{:s}{:s}PVE {:#x} maps wrong page ({:#x}) ".format(pmap_str
, tte_str
, ptep
, pte
)
1198 except Exception as exc
:
1199 print "{:s}{:s}Unable to read PVE {:#x}".format(pmap_str
, tte_str
, ptep
)
1200 if tte
is not None and not tte_match
:
1201 print "{:s}{:s}not found in PV list".format(pmap_str
, tte_str
, paddr
)
1203 @lldb_command('pv_check', 'P')
1204 def PVCheck(cmd_args
=None, cmd_options
={}):
1205 """ Check the physical-to-virtual mapping for a given PTE or physical address
1206 Syntax: (lldb) pv_check <addr> [-p]
1207 -P : Interpret <addr> as a physical address rather than a PTE
1209 if cmd_args
== None or len(cmd_args
) < 1:
1210 raise ArgumentError("Too few arguments to showallmappings.")
1211 if kern
.arch
== 'arm':
1213 elif kern
.arch
.startswith('arm64'):
1216 raise NotImplementedError("showallmappings does not support {0}".format(kern
.arch
))
1217 if "-P" in cmd_options
:
1219 pa
= long(unsigned(kern
.GetValueFromAddress(cmd_args
[0], "unsigned long")))
1221 pte
= kern
.GetValueFromAddress(cmd_args
[0], 'pt_entry_t *')
1222 pa
= long(unsigned(dereference(pte
)))
1223 checkPVList(None, level
, None, pte
, pa
, None)
1225 @lldb_command('check_pmaps')
1226 def CheckPmapIntegrity(cmd_args
=None):
1227 """ Performs a system-wide integrity check of all PTEs and associated PV lists.
1228 Optionally only checks the pmap specified by [<pmap>]
1229 Syntax: (lldb) check_pmaps [<pmap>]
1230 WARNING: this macro can take a HUGE amount of time (several hours) if you do not
1231 specify [pmap] to limit it to a single pmap. It will also give false positives
1232 for kernel_pmap, as we do not create PV entries for static kernel mappings on ARM.
1233 Use of this macro without the [<pmap>] argument is heavily discouraged.
1235 if not kern
.arch
.startswith('arm'):
1236 raise NotImplementedError("showallmappings does not support {0}".format(kern
.arch
))
1238 if len(cmd_args
) > 0:
1239 targetPmap
= cmd_args
[0]
1240 ScanPageTables(checkPVList
, targetPmap
)
1242 @lldb_command('pmapsforledger')
1243 def PmapsForLedger(cmd_args
=None):
1244 """ Find and display all pmaps currently using <ledger>.
1245 Syntax: (lldb) pmapsforledger <ledger>
1247 if cmd_args
== None or len(cmd_args
) < 1:
1248 raise ArgumentError("Too few arguments to pmapsforledger.")
1249 if not kern
.arch
.startswith('arm'):
1250 raise NotImplementedError("pmapsforledger does not support {0}".format(kern
.arch
))
1251 ledger
= kern
.GetValueFromAddress(cmd_args
[0], 'ledger_t')
1252 for pmap
in IterateQueue(kern
.globals.map_pmap_list
, 'pmap_t', 'pmaps'):
1253 if pmap
.ledger
== ledger
:
1254 print "pmap: {:#x}".format(pmap
)