2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Kernel memory management definitions.
66 #ifndef _VM_VM_KERN_H_
67 #define _VM_VM_KERN_H_
73 #include <mach/mach_types.h>
74 #include <mach/boolean.h>
75 #include <mach/kern_return.h>
79 #ifdef XNU_KERNEL_PRIVATE
81 #include <kern/locks.h>
85 __options_decl(kma_flags_t
, uint32_t, {
86 KMA_NONE
= 0x00000000,
87 KMA_HERE
= 0x00000001,
88 KMA_NOPAGEWAIT
= 0x00000002,
89 KMA_KOBJECT
= 0x00000004,
90 KMA_LOMEM
= 0x00000008,
91 KMA_GUARD_FIRST
= 0x00000010,
92 KMA_GUARD_LAST
= 0x00000020,
93 KMA_PERMANENT
= 0x00000040,
94 KMA_NOENCRYPT
= 0x00000080,
95 KMA_KSTACK
= 0x00000100,
96 KMA_VAONLY
= 0x00000200,
98 * Pages belonging to the compressor are not on the paging queues,
99 * nor are they counted as wired.
101 KMA_COMPRESSOR
= 0x00000400,
102 KMA_ATOMIC
= 0x00000800,
103 KMA_ZERO
= 0x00001000,
104 KMA_PAGEABLE
= 0x00002000,
105 KMA_KHEAP
= 0x00004000, /* Pages belonging to zones backing one of kalloc_heap. */
108 extern kern_return_t
kernel_memory_allocate(
116 extern kern_return_t
kmem_alloc(
120 vm_tag_t tag
) __XNU_INTERNAL(kmem_alloc
);
122 extern kern_return_t
kmem_alloc_contig(
132 extern kern_return_t
kmem_alloc_flags(
139 extern kern_return_t
kmem_alloc_pageable(
143 vm_tag_t tag
) __XNU_INTERNAL(kmem_alloc_pageable
);
145 extern kern_return_t
kmem_alloc_aligned(
151 extern kern_return_t
kmem_realloc(
155 vm_offset_t
*newaddrp
,
159 extern void kmem_free(
164 extern kern_return_t
kmem_suballoc(
170 vm_map_kernel_flags_t vmk_flags
,
174 extern kern_return_t
kmem_alloc_kobject(
178 vm_tag_t tag
) __XNU_INTERNAL(kmem_alloc_kobject
);
180 extern void kernel_memory_populate_with_pages(
184 struct vm_page
*page_list
,
188 extern kern_return_t
kernel_memory_populate(
195 extern void kernel_memory_depopulate(
202 extern kern_return_t
memory_object_iopl_request(
204 memory_object_offset_t offset
,
205 upl_size_t
*upl_size
,
207 upl_page_info_array_t user_page_list
,
208 unsigned int *page_list_count
,
209 upl_control_flags_t
*flags
,
212 struct mach_memory_info
;
213 extern kern_return_t
vm_page_diagnose(
214 struct mach_memory_info
*info
,
215 unsigned int num_info
,
216 uint64_t zones_collectable_bytes
);
218 extern uint32_t vm_page_diagnose_estimate(void);
220 #if DEBUG || DEVELOPMENT
222 extern kern_return_t
mach_memory_info_check(void);
224 extern kern_return_t
vm_kern_allocation_info(uintptr_t addr
, vm_size_t
* size
, vm_tag_t
* tag
, vm_size_t
* zone_size
);
226 #endif /* DEBUG || DEVELOPMENT */
229 extern void hibernate_rebuild_vm_structs(void);
230 #endif /* HIBERNATION */
232 extern vm_tag_t
vm_tag_bt(void);
234 extern vm_tag_t
vm_tag_alloc(vm_allocation_site_t
* site
);
236 extern void vm_tag_alloc_locked(vm_allocation_site_t
* site
, vm_allocation_site_t
** releasesiteP
);
238 extern void vm_tag_update_size(vm_tag_t tag
, int64_t size
);
242 extern void vm_allocation_zones_init(void);
243 extern vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag
, uint32_t zidx
, uint32_t zflags
);
244 extern void vm_tag_update_zone_size(vm_tag_t tag
, uint32_t zidx
, long delta
);
246 #endif /* VM_MAX_TAG_ZONES */
248 extern vm_tag_t
vm_tag_bt_debug(void);
250 extern uint32_t vm_tag_get_kext(vm_tag_t tag
, char * name
, vm_size_t namelen
);
252 extern boolean_t
vm_kernel_map_is_kernel(vm_map_t map
);
254 extern ppnum_t
kernel_pmap_present_mapping(uint64_t vaddr
, uint64_t * pvincr
, uintptr_t * pvphysaddr
);
256 #else /* XNU_KERNEL_PRIVATE */
258 extern kern_return_t
kmem_alloc(
263 extern kern_return_t
kmem_alloc_pageable(
268 extern kern_return_t
kmem_alloc_kobject(
273 extern void kmem_free(
278 #endif /* !XNU_KERNEL_PRIVATE */
281 #ifdef XNU_KERNEL_PRIVATE
282 typedef struct vm_allocation_site kern_allocation_name
;
283 typedef kern_allocation_name
* kern_allocation_name_t
;
284 #else /* XNU_KERNEL_PRIVATE */
285 struct kern_allocation_name
;
286 typedef struct kern_allocation_name
* kern_allocation_name_t
;
287 #endif /* !XNU_KERNEL_PRIVATE */
289 extern kern_allocation_name_t
kern_allocation_name_allocate(const char * name
, uint16_t suballocs
);
290 extern void kern_allocation_name_release(kern_allocation_name_t allocation
);
291 extern const char * kern_allocation_get_name(kern_allocation_name_t allocation
);
292 #ifdef XNU_KERNEL_PRIVATE
293 extern void kern_allocation_update_size(kern_allocation_name_t allocation
, int64_t delta
);
294 extern void kern_allocation_update_subtotal(kern_allocation_name_t allocation
, uint32_t subtag
, int64_t delta
);
295 extern vm_tag_t
kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation
);
296 #endif /* XNU_KERNEL_PRIVATE */
298 #ifdef MACH_KERNEL_PRIVATE
300 extern void kmem_init(
304 extern kern_return_t
copyinmap(
306 vm_map_offset_t fromaddr
,
310 extern kern_return_t
copyoutmap(
313 vm_map_offset_t toaddr
,
316 extern kern_return_t
copyoutmap_atomic32(
319 vm_map_offset_t toaddr
);
321 extern kern_return_t
copyoutmap_atomic64(
324 vm_map_offset_t toaddr
);
326 extern kern_return_t
kmem_alloc_external(
331 extern kern_return_t
kmem_alloc_kobject_external(
336 extern kern_return_t
kmem_alloc_pageable_external(
341 #endif /* MACH_KERNEL_PRIVATE */
343 #ifdef XNU_KERNEL_PRIVATE
345 extern kern_return_t
mach_vm_allocate_kernel(
347 mach_vm_offset_t
*addr
,
352 extern kern_return_t
vm_allocate_kernel(
360 extern kern_return_t
mach_vm_map_kernel(
362 mach_vm_offset_t
*address
,
363 mach_vm_size_t initial_size
,
364 mach_vm_offset_t mask
,
366 vm_map_kernel_flags_t vmk_flags
,
369 vm_object_offset_t offset
,
371 vm_prot_t cur_protection
,
372 vm_prot_t max_protection
,
373 vm_inherit_t inheritance
);
376 extern kern_return_t
vm_map_kernel(
378 vm_offset_t
*address
,
382 vm_map_kernel_flags_t vmk_flags
,
387 vm_prot_t cur_protection
,
388 vm_prot_t max_protection
,
389 vm_inherit_t inheritance
);
391 extern kern_return_t
mach_vm_remap_kernel(
393 mach_vm_offset_t
*address
,
395 mach_vm_offset_t mask
,
399 mach_vm_offset_t memory_address
,
401 vm_prot_t
*cur_protection
,
402 vm_prot_t
*max_protection
,
403 vm_inherit_t inheritance
);
405 extern kern_return_t
vm_remap_kernel(
407 vm_offset_t
*address
,
413 vm_offset_t memory_address
,
415 vm_prot_t
*cur_protection
,
416 vm_prot_t
*max_protection
,
417 vm_inherit_t inheritance
);
419 extern kern_return_t
vm_map_64_kernel(
421 vm_offset_t
*address
,
425 vm_map_kernel_flags_t vmk_flags
,
428 vm_object_offset_t offset
,
430 vm_prot_t cur_protection
,
431 vm_prot_t max_protection
,
432 vm_inherit_t inheritance
);
434 extern kern_return_t
mach_vm_wire_kernel(
435 host_priv_t host_priv
,
437 mach_vm_offset_t start
,
442 extern kern_return_t
vm_map_wire_kernel(
444 vm_map_offset_t start
,
446 vm_prot_t caller_prot
,
448 boolean_t user_wire
);
450 extern kern_return_t
vm_map_wire_and_extract_kernel(
452 vm_map_offset_t start
,
453 vm_prot_t caller_prot
,
456 ppnum_t
*physpage_p
);
458 #endif /* XNU_KERNEL_PRIVATE */
460 extern vm_map_t kernel_map
;
461 extern vm_map_t kernel_pageable_map
;
462 extern vm_map_t ipc_kernel_map
;
463 extern vm_map_t g_kext_map
;
465 #endif /* KERNEL_PRIVATE */
470 #if MACH_KERNEL_PRIVATE
471 extern vm_offset_t
vm_kernel_addrhash(vm_offset_t addr
)
472 __XNU_INTERNAL(vm_kernel_addrhash
);
474 extern vm_offset_t
vm_kernel_addrhash(vm_offset_t addr
);
478 extern void vm_kernel_addrhide(
480 vm_offset_t
*hide_addr
);
482 extern vm_offset_t vm_kernel_addrperm_ext
;
484 extern void vm_kernel_addrperm_external(
486 vm_offset_t
*perm_addr
);
488 extern void vm_kernel_unslide_or_perm_external(
490 vm_offset_t
*up_addr
);
492 #if MACH_KERNEL_PRIVATE
493 extern uint64_t vm_kernel_addrhash_salt
;
494 extern uint64_t vm_kernel_addrhash_salt_ext
;
496 extern void vm_kernel_addrhash_external(
498 vm_offset_t
*perm_addr
);
499 #endif /* MACH_KERNEL_PRIVATE */
501 extern void vm_init_before_launchd(void);
509 #endif /* _VM_VM_KERN_H_ */