2 * Copyright (c) 2003-2008 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <platforms.h>
60 #include <mach/i386/vm_param.h>
63 #include <mach/vm_param.h>
64 #include <mach/vm_prot.h>
65 #include <mach/machine.h>
66 #include <mach/time_value.h>
68 #include <kern/assert.h>
69 #include <kern/debug.h>
70 #include <kern/misc_protos.h>
71 #include <kern/cpu_data.h>
72 #include <kern/processor.h>
73 #include <vm/vm_page.h>
75 #include <vm/vm_kern.h>
76 #include <i386/pmap.h>
77 #include <i386/misc_protos.h>
78 #include <i386/cpuid.h>
79 #include <mach/thread_status.h>
80 #include <pexpert/i386/efi.h>
81 #include <i386/i386_lowmem.h>
82 #include <i386/lowglobals.h>
83 #include <i386/pal_routines.h>
85 #include <mach-o/loader.h>
86 #include <libkern/kernel_mach_header.h>
88 vm_size_t mem_size
= 0;
89 pmap_paddr_t first_avail
= 0;/* first after page tables */
91 uint64_t max_mem
; /* Size of physical memory (bytes), adjusted by maxmem */
93 uint64_t sane_size
= 0; /* Memory size to use for defaults calculations */
95 #define MAXLORESERVE (32 * 1024 * 1024)
97 ppnum_t max_ppnum
= 0;
98 ppnum_t lowest_lo
= 0;
99 ppnum_t lowest_hi
= 0;
100 ppnum_t highest_hi
= 0;
102 enum {PMAP_MAX_RESERVED_RANGES
= 32};
103 uint32_t pmap_reserved_pages_allocated
= 0;
104 uint32_t pmap_reserved_range_indices
[PMAP_MAX_RESERVED_RANGES
];
105 uint32_t pmap_last_reserved_range_index
= 0;
106 uint32_t pmap_reserved_ranges
= 0;
108 extern unsigned int bsd_mbuf_cluster_reserve(boolean_t
*);
110 pmap_paddr_t avail_start
, avail_end
;
111 vm_offset_t virtual_avail
, virtual_end
;
112 static pmap_paddr_t avail_remaining
;
113 vm_offset_t static_memory_end
= 0;
115 vm_offset_t sHIB
, eHIB
, stext
, etext
, sdata
, edata
, end
;
118 * _mh_execute_header is the mach_header for the currently executing kernel
120 void *sectTEXTB
; unsigned long sectSizeTEXT
;
121 void *sectDATAB
; unsigned long sectSizeDATA
;
122 void *sectOBJCB
; unsigned long sectSizeOBJC
;
123 void *sectLINKB
; unsigned long sectSizeLINK
;
124 void *sectPRELINKB
; unsigned long sectSizePRELINK
;
125 void *sectHIBB
; unsigned long sectSizeHIB
;
126 void *sectINITPTB
; unsigned long sectSizeINITPT
;
128 kernel_segment_command_t
*segTEXT
;
129 kernel_section_t
*cursectTEXT
, *lastsectTEXT
;
131 extern uint64_t firmware_Conventional_bytes
;
132 extern uint64_t firmware_RuntimeServices_bytes
;
133 extern uint64_t firmware_ACPIReclaim_bytes
;
134 extern uint64_t firmware_ACPINVS_bytes
;
135 extern uint64_t firmware_PalCode_bytes
;
136 extern uint64_t firmware_Reserved_bytes
;
137 extern uint64_t firmware_Unusable_bytes
;
138 extern uint64_t firmware_other_bytes
;
139 uint64_t firmware_MMIO_bytes
;
142 #define PRINT_PMAP_MEMORY_TABLE
145 * Basic VM initialization.
148 i386_vm_init(uint64_t maxmem
,
152 pmap_memory_region_t
*pmptr
;
153 pmap_memory_region_t
*prev_pmptr
;
154 EfiMemoryRange
*mptr
;
159 unsigned int safeboot
;
162 uint32_t maxloreserve
;
164 uint32_t mbuf_reserve
= 0;
165 boolean_t mbuf_override
= FALSE
;
166 boolean_t coalescing_permitted
;
168 kprintf("Boot args revision: %d version: %d",
169 args
->Revision
, args
->Version
);
170 kprintf(" commandline: \"");
171 for(i
=0; i
<BOOT_LINE_LENGTH
; i
++)
172 kprintf("%c", args
->CommandLine
[i
]);
177 * Now retrieve addresses for end, edata, and etext
178 * from MACH-O headers.
181 sectTEXTB
= (void *) getsegdatafromheader(
182 &_mh_execute_header
, "__TEXT", §SizeTEXT
);
183 sectDATAB
= (void *) getsegdatafromheader(
184 &_mh_execute_header
, "__DATA", §SizeDATA
);
185 sectOBJCB
= (void *) getsegdatafromheader(
186 &_mh_execute_header
, "__OBJC", §SizeOBJC
);
187 sectLINKB
= (void *) getsegdatafromheader(
188 &_mh_execute_header
, "__LINKEDIT", §SizeLINK
);
189 sectHIBB
= (void *)getsegdatafromheader(
190 &_mh_execute_header
, "__HIB", §SizeHIB
);
191 sectINITPTB
= (void *)getsegdatafromheader(
192 &_mh_execute_header
, "__INITPT", §SizeINITPT
);
193 sectPRELINKB
= (void *) getsegdatafromheader(
194 &_mh_execute_header
, "__PRELINK_TEXT", §SizePRELINK
);
196 segTEXT
= getsegbynamefromheader(&_mh_execute_header
, "__TEXT");
197 cursectTEXT
= lastsectTEXT
= firstsect(segTEXT
);
198 /* Discover the last TEXT section within the TEXT segment */
199 while ((cursectTEXT
= nextsect(segTEXT
, cursectTEXT
)) != NULL
) {
200 lastsectTEXT
= cursectTEXT
;
203 sHIB
= (vm_offset_t
) sectHIBB
;
204 eHIB
= (vm_offset_t
) sectHIBB
+ sectSizeHIB
;
205 /* Zero-padded from ehib to stext if text is 2M-aligned */
206 stext
= (vm_offset_t
) sectTEXTB
;
207 etext
= (vm_offset_t
) round_page_64(lastsectTEXT
->addr
+ lastsectTEXT
->size
);
208 /* Zero-padded from etext to sdata if text is 2M-aligned */
209 sdata
= (vm_offset_t
) sectDATAB
;
210 edata
= (vm_offset_t
) sectDATAB
+ sectSizeDATA
;
213 kprintf("sectTEXTB = %p\n", sectTEXTB
);
214 kprintf("sectDATAB = %p\n", sectDATAB
);
215 kprintf("sectOBJCB = %p\n", sectOBJCB
);
216 kprintf("sectLINKB = %p\n", sectLINKB
);
217 kprintf("sectHIBB = %p\n", sectHIBB
);
218 kprintf("sectPRELINKB = %p\n", sectPRELINKB
);
219 kprintf("eHIB = %p\n", (void *) eHIB
);
220 kprintf("stext = %p\n", (void *) stext
);
221 kprintf("etext = %p\n", (void *) etext
);
222 kprintf("sdata = %p\n", (void *) sdata
);
223 kprintf("edata = %p\n", (void *) edata
);
229 * Compute the memory size.
232 if ((1 == vm_himemory_mode
) || PE_parse_boot_argn("-x", &safeboot
, sizeof (safeboot
))) {
233 maxpg
= 1 << (32 - I386_PGSHIFT
);
237 pmptr
= pmap_memory_regions
;
239 pmap_memory_region_count
= pmap_memory_region_current
= 0;
240 fap
= (ppnum_t
) i386_btop(first_avail
);
242 mptr
= (EfiMemoryRange
*)ml_static_ptovirt((vm_offset_t
)args
->MemoryMap
);
243 if (args
->MemoryMapDescriptorSize
== 0)
244 panic("Invalid memory map descriptor size");
245 msize
= args
->MemoryMapDescriptorSize
;
246 mcount
= args
->MemoryMapSize
/ msize
;
248 #define FOURGIG 0x0000000100000000ULL
249 #define ONEGIG 0x0000000040000000ULL
251 for (i
= 0; i
< mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
253 uint64_t region_bytes
= 0;
255 if (pmap_memory_region_count
>= PMAP_MEMORY_REGIONS_SIZE
) {
256 kprintf("WARNING: truncating memory region count at %d\n", pmap_memory_region_count
);
259 base
= (ppnum_t
) (mptr
->PhysicalStart
>> I386_PGSHIFT
);
260 top
= (ppnum_t
) (((mptr
->PhysicalStart
) >> I386_PGSHIFT
) + mptr
->NumberOfPages
- 1);
263 static uint32_t nmr
= 0;
264 if ((base
> 0x20000) && (nmr
++ < 4))
265 mptr
->Attribute
|= EFI_MEMORY_KERN_RESERVED
;
267 region_bytes
= (uint64_t)(mptr
->NumberOfPages
<< I386_PGSHIFT
);
268 pmap_type
= mptr
->Type
;
270 switch (mptr
->Type
) {
273 case kEfiBootServicesCode
:
274 case kEfiBootServicesData
:
275 case kEfiConventionalMemory
:
277 * Consolidate usable memory types into one.
279 pmap_type
= kEfiConventionalMemory
;
280 sane_size
+= region_bytes
;
281 firmware_Conventional_bytes
+= region_bytes
;
284 * sane_size should reflect the total amount of physical
285 * RAM in the system, not just the amount that is
286 * available for the OS to use.
287 * FIXME:Consider deriving this value from SMBIOS tables
288 * rather than reverse engineering the memory map.
290 * <rdar://problem/4642773> Memory map should
291 * describe all memory
292 * Firmware on some systems guarantees that the memory
293 * map is complete via the "RomReservedMemoryTracked"
294 * feature field--consult that where possible to
295 * avoid the "round up to 128M" workaround below.
298 case kEfiRuntimeServicesCode
:
299 case kEfiRuntimeServicesData
:
300 firmware_RuntimeServices_bytes
+= region_bytes
;
301 sane_size
+= region_bytes
;
303 case kEfiACPIReclaimMemory
:
304 firmware_ACPIReclaim_bytes
+= region_bytes
;
305 sane_size
+= region_bytes
;
307 case kEfiACPIMemoryNVS
:
308 firmware_ACPINVS_bytes
+= region_bytes
;
309 sane_size
+= region_bytes
;
312 firmware_PalCode_bytes
+= region_bytes
;
313 sane_size
+= region_bytes
;
316 case kEfiReservedMemoryType
:
317 firmware_Reserved_bytes
+= region_bytes
;
319 case kEfiUnusableMemory
:
320 firmware_Unusable_bytes
+= region_bytes
;
322 case kEfiMemoryMappedIO
:
323 case kEfiMemoryMappedIOPortSpace
:
324 firmware_MMIO_bytes
+= region_bytes
;
327 firmware_other_bytes
+= region_bytes
;
332 kprintf("EFI region %d: type %u/%d, base 0x%x, top 0x%x\n",
333 i
, mptr
->Type
, pmap_type
, base
, top
);
339 top
= (top
> maxpg
) ? maxpg
: top
;
345 if ((mptr
->Attribute
& EFI_MEMORY_RUNTIME
) == EFI_MEMORY_RUNTIME
||
346 pmap_type
!= kEfiConventionalMemory
) {
351 * Usable memory region
353 if (top
< I386_LOWMEM_RESERVED
||
354 !pal_is_usable_memory(base
, top
)) {
359 * A range may be marked with with the
360 * EFI_MEMORY_KERN_RESERVED attribute
361 * on some systems, to indicate that the range
362 * must not be made available to devices.
365 if (mptr
->Attribute
& EFI_MEMORY_KERN_RESERVED
) {
366 if (++pmap_reserved_ranges
> PMAP_MAX_RESERVED_RANGES
) {
367 panic("Too many reserved ranges %u\n", pmap_reserved_ranges
);
373 * entire range below first_avail
374 * salvage some low memory pages
375 * we use some very low memory at startup
376 * mark as already allocated here
378 if (base
>= I386_LOWMEM_RESERVED
)
381 pmptr
->base
= I386_LOWMEM_RESERVED
;
386 if ((mptr
->Attribute
& EFI_MEMORY_KERN_RESERVED
) &&
387 (top
< I386_KERNEL_IMAGE_BASE_PAGE
)) {
388 pmptr
->alloc
= pmptr
->base
;
389 pmap_reserved_range_indices
[pmap_last_reserved_range_index
++] = pmap_memory_region_count
;
393 * mark as already mapped
397 pmptr
->type
= pmap_type
;
398 pmptr
->attribute
= mptr
->Attribute
;
400 else if ( (base
< fap
) && (top
> fap
) ) {
403 * put mem below first avail in table but
404 * mark already allocated
407 pmptr
->alloc
= pmptr
->end
= (fap
- 1);
408 pmptr
->type
= pmap_type
;
409 pmptr
->attribute
= mptr
->Attribute
;
411 * we bump these here inline so the accounting
412 * below works correctly
415 pmap_memory_region_count
++;
417 pmptr
->alloc
= pmptr
->base
= fap
;
418 pmptr
->type
= pmap_type
;
419 pmptr
->attribute
= mptr
->Attribute
;
422 if (mptr
->Attribute
& EFI_MEMORY_KERN_RESERVED
)
423 pmap_reserved_range_indices
[pmap_last_reserved_range_index
++] = pmap_memory_region_count
;
426 * entire range useable
428 pmptr
->alloc
= pmptr
->base
= base
;
429 pmptr
->type
= pmap_type
;
430 pmptr
->attribute
= mptr
->Attribute
;
432 if (mptr
->Attribute
& EFI_MEMORY_KERN_RESERVED
)
433 pmap_reserved_range_indices
[pmap_last_reserved_range_index
++] = pmap_memory_region_count
;
436 if (i386_ptob(pmptr
->end
) > avail_end
)
437 avail_end
= i386_ptob(pmptr
->end
);
439 avail_remaining
+= (pmptr
->end
- pmptr
->base
);
440 coalescing_permitted
= (prev_pmptr
&& (pmptr
->attribute
== prev_pmptr
->attribute
) && ((pmptr
->attribute
& EFI_MEMORY_KERN_RESERVED
) == 0));
442 * Consolidate contiguous memory regions, if possible
445 (pmptr
->type
== prev_pmptr
->type
) &&
446 (coalescing_permitted
) &&
447 (pmptr
->base
== pmptr
->alloc
) &&
448 (pmptr
->base
== (prev_pmptr
->end
+ 1)))
450 if (prev_pmptr
->end
== prev_pmptr
->alloc
)
451 prev_pmptr
->alloc
= pmptr
->base
;
452 prev_pmptr
->end
= pmptr
->end
;
454 pmap_memory_region_count
++;
461 #ifdef PRINT_PMAP_MEMORY_TABLE
464 pmap_memory_region_t
*p
= pmap_memory_regions
;
465 addr64_t region_start
, region_end
;
466 addr64_t efi_start
, efi_end
;
467 for (j
=0;j
<pmap_memory_region_count
;j
++, p
++) {
468 kprintf("pmap region %d type %d base 0x%llx alloc 0x%llx top 0x%llx\n",
470 (addr64_t
) p
->base
<< I386_PGSHIFT
,
471 (addr64_t
) p
->alloc
<< I386_PGSHIFT
,
472 (addr64_t
) p
->end
<< I386_PGSHIFT
);
473 region_start
= (addr64_t
) p
->base
<< I386_PGSHIFT
;
474 region_end
= ((addr64_t
) p
->end
<< I386_PGSHIFT
) - 1;
475 mptr
= (EfiMemoryRange
*) ml_static_ptovirt((vm_offset_t
)args
->MemoryMap
);
476 for (i
=0; i
<mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
477 if (mptr
->Type
!= kEfiLoaderCode
&&
478 mptr
->Type
!= kEfiLoaderData
&&
479 mptr
->Type
!= kEfiBootServicesCode
&&
480 mptr
->Type
!= kEfiBootServicesData
&&
481 mptr
->Type
!= kEfiConventionalMemory
) {
482 efi_start
= (addr64_t
)mptr
->PhysicalStart
;
483 efi_end
= efi_start
+ ((vm_offset_t
)mptr
->NumberOfPages
<< I386_PGSHIFT
) - 1;
484 if ((efi_start
>= region_start
&& efi_start
<= region_end
) ||
485 (efi_end
>= region_start
&& efi_end
<= region_end
)) {
486 kprintf(" *** Overlapping region with EFI runtime region %d\n", i
);
494 avail_start
= first_avail
;
495 mem_actual
= sane_size
;
498 * For user visible memory size, round up to 128 Mb - accounting for the various stolen memory
499 * not reported by EFI.
502 sane_size
= (sane_size
+ 128 * MB
- 1) & ~((uint64_t)(128 * MB
- 1));
505 * We cap at KERNEL_MAXMEM bytes (currently 32GB for K32, 96GB for K64).
506 * Unless overriden by the maxmem= boot-arg
507 * -- which is a non-zero maxmem argument to this function.
509 if (maxmem
== 0 && sane_size
> KERNEL_MAXMEM
) {
510 maxmem
= KERNEL_MAXMEM
;
511 printf("Physical memory %lld bytes capped at %dGB\n",
512 sane_size
, (uint32_t) (KERNEL_MAXMEM
/GB
));
516 * if user set maxmem, reduce memory sizes
518 if ( (maxmem
> (uint64_t)first_avail
) && (maxmem
< sane_size
)) {
519 ppnum_t discarded_pages
= (ppnum_t
)((sane_size
- maxmem
) >> I386_PGSHIFT
);
520 ppnum_t highest_pn
= 0;
521 ppnum_t cur_alloc
= 0;
522 uint64_t pages_to_use
;
523 unsigned cur_region
= 0;
527 if (avail_remaining
> discarded_pages
)
528 avail_remaining
-= discarded_pages
;
532 pages_to_use
= avail_remaining
;
534 while (cur_region
< pmap_memory_region_count
&& pages_to_use
) {
535 for (cur_alloc
= pmap_memory_regions
[cur_region
].alloc
;
536 cur_alloc
< pmap_memory_regions
[cur_region
].end
&& pages_to_use
;
538 if (cur_alloc
> highest_pn
)
539 highest_pn
= cur_alloc
;
542 if (pages_to_use
== 0)
543 pmap_memory_regions
[cur_region
].end
= cur_alloc
;
547 pmap_memory_region_count
= cur_region
;
549 avail_end
= i386_ptob(highest_pn
+ 1);
553 * mem_size is only a 32 bit container... follow the PPC route
554 * and pin it to a 2 Gbyte maximum
556 if (sane_size
> (FOURGIG
>> 1))
557 mem_size
= (vm_size_t
)(FOURGIG
>> 1);
559 mem_size
= (vm_size_t
)sane_size
;
562 kprintf("Physical memory %llu MB\n", sane_size
/MB
);
564 max_valid_low_ppnum
= (2 * GB
) / PAGE_SIZE
;
566 if (!PE_parse_boot_argn("max_valid_dma_addr", &maxdmaaddr
, sizeof (maxdmaaddr
))) {
567 max_valid_dma_address
= (uint64_t)4 * (uint64_t)GB
;
569 max_valid_dma_address
= ((uint64_t) maxdmaaddr
) * MB
;
571 if ((max_valid_dma_address
/ PAGE_SIZE
) < max_valid_low_ppnum
)
572 max_valid_low_ppnum
= (ppnum_t
)(max_valid_dma_address
/ PAGE_SIZE
);
574 if (avail_end
>= max_valid_dma_address
) {
576 if (!PE_parse_boot_argn("maxloreserve", &maxloreserve
, sizeof (maxloreserve
))) {
578 if (sane_size
>= (ONEGIG
* 15))
579 maxloreserve
= (MAXLORESERVE
/ PAGE_SIZE
) * 4;
580 else if (sane_size
>= (ONEGIG
* 7))
581 maxloreserve
= (MAXLORESERVE
/ PAGE_SIZE
) * 2;
583 maxloreserve
= MAXLORESERVE
/ PAGE_SIZE
;
585 mbuf_reserve
= bsd_mbuf_cluster_reserve(&mbuf_override
) / PAGE_SIZE
;
587 maxloreserve
= (maxloreserve
* (1024 * 1024)) / PAGE_SIZE
;
590 vm_lopage_free_limit
= maxloreserve
;
592 if (mbuf_override
== TRUE
) {
593 vm_lopage_free_limit
+= mbuf_reserve
;
594 vm_lopage_lowater
= 0;
596 vm_lopage_lowater
= vm_lopage_free_limit
/ 16;
598 vm_lopage_refill
= TRUE
;
599 vm_lopage_needed
= TRUE
;
604 * Initialize kernel physical map.
605 * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS.
607 kprintf("avail_remaining = 0x%lx\n", (unsigned long)avail_remaining
);
608 pmap_bootstrap(0, IA32e
);
613 pmap_free_pages(void)
615 return (unsigned int)avail_remaining
;
619 boolean_t
pmap_next_page_reserved(ppnum_t
*);
622 * Pick a page from a "kernel private" reserved range; works around
623 * errata on some hardware.
626 pmap_next_page_reserved(ppnum_t
*pn
) {
627 if (pmap_reserved_ranges
) {
629 pmap_memory_region_t
*region
;
630 for (n
= 0; n
< pmap_last_reserved_range_index
; n
++) {
631 uint32_t reserved_index
= pmap_reserved_range_indices
[n
];
632 region
= &pmap_memory_regions
[reserved_index
];
633 if (region
->alloc
< region
->end
) {
634 *pn
= region
->alloc
++;
640 if (lowest_lo
== 0 || *pn
< lowest_lo
)
643 pmap_reserved_pages_allocated
++;
645 if (region
->alloc
== region
->end
) {
646 kprintf("Exhausted reserved range index: %u, base: 0x%x end: 0x%x, type: 0x%x, attribute: 0x%llx\n", reserved_index
, region
->base
, region
->end
, region
->type
, region
->attribute
);
661 pmap_memory_region_t
*region
;
664 if (pmap_next_page_reserved(pn
))
667 if (avail_remaining
) {
668 for (n
= pmap_memory_region_count
- 1; n
>= 0; n
--) {
669 region
= &pmap_memory_regions
[n
];
671 if (region
->alloc
!= region
->end
) {
672 *pn
= region
->alloc
++;
678 if (lowest_lo
== 0 || *pn
< lowest_lo
)
681 if (lowest_hi
== 0 || *pn
< lowest_hi
)
684 if (*pn
> highest_hi
)
699 if (avail_remaining
) while (pmap_memory_region_current
< pmap_memory_region_count
) {
700 if (pmap_memory_regions
[pmap_memory_region_current
].alloc
==
701 pmap_memory_regions
[pmap_memory_region_current
].end
) {
702 pmap_memory_region_current
++;
705 *pn
= pmap_memory_regions
[pmap_memory_region_current
].alloc
++;
711 if (lowest_lo
== 0 || *pn
< lowest_lo
)
725 pmap_memory_region_t
*pmptr
= pmap_memory_regions
;
727 for (i
= 0; i
< pmap_memory_region_count
; i
++, pmptr
++) {
728 if ( (pn
>= pmptr
->base
) && (pn
<= pmptr
->end
) )