]> git.saurik.com Git - apple/xnu.git/blame - osfmk/default_pager/default_pager_internal.h
xnu-1504.3.12.tar.gz
[apple/xnu.git] / osfmk / default_pager / default_pager_internal.h
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 * Default pager.
59 * General definitions.
60 */
61
62#ifndef _DEFAULT_PAGER_INTERNAL_H_
63#define _DEFAULT_PAGER_INTERNAL_H_
64
65#include <default_pager/diag.h>
66#include <default_pager/default_pager_types.h>
67#include <mach/mach_types.h>
68#include <ipc/ipc_port.h>
69#include <ipc/ipc_types.h>
70#include <ipc/ipc_space.h>
71#include <kern/lock.h>
72#include <kern/kalloc.h>
73#include <kern/thread.h>
74#include <vm/vm_kern.h>
75#include <device/device_types.h>
76
77/*
78 * Default option settings.
79 */
80#ifndef PARALLEL
81#define PARALLEL 1
82#endif
83
84#ifndef CHECKSUM
85#define CHECKSUM 0
86#endif
87
88#define MACH_PORT_FACE mach_port_t
89
0b4e3aa0 90#if 0
1c79356b
A
91#ifndef USE_PRECIOUS
92#define USE_PRECIOUS TRUE
93#endif
0b4e3aa0 94#endif
1c79356b
A
95
96#ifdef USER_PAGER
97#define UP(stuff) stuff
98#else /* USER_PAGER */
99#define UP(stuff)
100#endif /* USER_PAGER */
101
1c79356b
A
102#define dprintf(args) \
103 do { \
0b4e3aa0 104 printf("%s[KERNEL]: ", my_name); \
1c79356b
A
105 printf args; \
106 } while (0)
1c79356b
A
107
108/*
109 * Debug.
110 */
0b4e3aa0 111__private_extern__ char my_name[];
1c79356b
A
112
113#define DEFAULT_PAGER_DEBUG 0
114
115#if DEFAULT_PAGER_DEBUG
116
117extern int debug_mask;
118#define DEBUG_MSG_EXTERNAL 0x00000001
119#define DEBUG_MSG_INTERNAL 0x00000002
120#define DEBUG_MO_EXTERNAL 0x00000100
121#define DEBUG_MO_INTERNAL 0x00000200
122#define DEBUG_VS_EXTERNAL 0x00010000
123#define DEBUG_VS_INTERNAL 0x00020000
124#define DEBUG_BS_EXTERNAL 0x01000000
125#define DEBUG_BS_INTERNAL 0x02000000
126
91447636 127#define DP_DEBUG(level, args) \
1c79356b
A
128 do { \
129 if (debug_mask & (level)) \
130 dprintf(args); \
131 } while (0)
132
133#define ASSERT(expr) \
134 do { \
135 if (!(expr)) \
136#ifndef MACH_KERNEL
137 panic("%s[%d]%s: assertion failed in %s line %d: %s",\
138 my_name, dp_thread_id(), here, \
139 __FILE__, __LINE__, # expr); \
140#else
0b4e3aa0
A
141 panic("%s[KERNEL]: assertion failed in %s line %d: %s",\
142 my_name, __FILE__, __LINE__, # expr); \
1c79356b
A
143#endif
144 } while (0)
145
146#else /* DEFAULT_PAGER_DEBUG */
147
2d21ac55
A
148#define DP_DEBUG(level, args) do {} while(0)
149#define ASSERT(clause) do {} while(0)
1c79356b
A
150
151#endif /* DEFAULT_PAGER_DEBUG */
152
153#ifndef MACH_KERNEL
154extern char *mach_error_string(kern_return_t);
155#endif
156
1c79356b
A
157#define PAGER_SUCCESS 0
158#define PAGER_FULL 1
159#define PAGER_ERROR 2
160
161/*
162 * VM and IPC globals.
163 */
164#ifdef MACH_KERNEL
b0d623f7
A
165#define vm_page_size PAGE_SIZE
166#define vm_page_mask PAGE_MASK
167#define vm_page_shift PAGE_SHIFT
1c79356b
A
168#else
169extern vm_object_size_t vm_page_size;
1c79356b
A
170extern unsigned long long vm_page_mask;
171extern int vm_page_shift;
b0d623f7 172#endif
1c79356b
A
173
174#ifndef MACH_KERNEL
175#define ptoa(p) ((p)*vm_page_size)
176#define atop(a) ((a)/vm_page_size)
177#endif
b0d623f7 178#define howmany(a,b) ((((a) % (b)) == 0) ? ((a) / (b)) : (((a) / (b)) + 1))
1c79356b 179
0b4e3aa0
A
180extern memory_object_default_t default_pager_object;
181
1c79356b 182#ifdef MACH_KERNEL
b0d623f7 183extern lck_mtx_t dpt_lock; /* Lock for the dpt array */
91447636 184extern int default_pager_internal_count;
1c79356b
A
185extern MACH_PORT_FACE default_pager_host_port;
186/* extern task_t default_pager_self; */ /* dont need or want */
187extern MACH_PORT_FACE default_pager_internal_set;
188extern MACH_PORT_FACE default_pager_external_set;
1c79356b
A
189extern MACH_PORT_FACE default_pager_default_set;
190#else
191extern mach_port_t default_pager_host_port;
192extern task_port_t default_pager_self;
193extern mach_port_t default_pager_internal_set;
194extern mach_port_t default_pager_external_set;
1c79356b
A
195extern mach_port_t default_pager_default_set;
196#endif
197
b0d623f7
A
198typedef vm32_offset_t dp_offset_t;
199typedef vm32_size_t dp_size_t;
200typedef vm32_address_t dp_address_t;
201
1c79356b
A
202typedef struct default_pager_thread {
203#ifndef MACH_KERNEL
204 cthread_t dpt_thread; /* Server thread. */
205#endif
206 vm_offset_t dpt_buffer; /* Read buffer. */
207 boolean_t dpt_internal; /* Do we handle internal objects? */
208#ifndef MACH_KERNEL
209 int dpt_id; /* thread id for printf */
210#else
211 int checked_out;
212#endif
213 boolean_t dpt_initialized_p; /* Thread is ready for requests. */
214} default_pager_thread_t;
215
216#ifdef MACH_KERNEL
217extern default_pager_thread_t **dpt_array;
218#endif
219
220/*
221 * Global statistics.
222 */
b0d623f7 223struct global_stats {
1c79356b
A
224 unsigned int gs_pageout_calls; /* # pageout calls */
225 unsigned int gs_pagein_calls; /* # pagein calls */
226 unsigned int gs_pages_in; /* # pages paged in (total) */
227 unsigned int gs_pages_out; /* # pages paged out (total) */
228 unsigned int gs_pages_unavail; /* # zero-fill pages */
229 unsigned int gs_pages_init; /* # page init requests */
230 unsigned int gs_pages_init_writes; /* # page init writes */
231 VSTATS_LOCK_DECL(gs_lock)
b0d623f7
A
232};
233extern struct global_stats global_stats;
1c79356b
A
234#define GSTAT(clause) VSTATS_ACTION(&global_stats.gs_lock, (clause))
235
236/*
237 * Cluster related definitions.
238 * Clusters are sized in number of pages per cluster.
239 * Cluster sizes must be powers of two.
240 *
241 * These numbers are related to the struct vs_map,
242 * defined below.
243 */
244#define MAX_CLUSTER_SIZE 8
245#define MAX_CLUSTER_SHIFT 3
246#define NO_CLSIZE 0
247
248/*
249 * bit map related macros
250 */
251#define NBBY 8 /* bits per byte XXX */
252#define BYTEMASK 0xff
253#define setbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) |= 1<<((i)%NBBY))
254#define clrbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) &= ~(1<<((i)%NBBY)))
255#define isset(a,i) (*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY)))
256#define isclr(a,i) ((*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY))) == 0)
257
258/*
259 * Default Pager.
260 * Backing Store Management.
261 */
262
263#define BS_MAXPRI 4
264#define BS_MINPRI 0
265#define BS_NOPRI -1
266#define BS_FULLPRI -2
267
b0d623f7
A
268/*
269 * Quick way to access the emergency segment backing store structures
270 * without a full-blown search.
271 */
272extern MACH_PORT_FACE emergency_segment_backing_store;
273
1c79356b
A
274/*
275 * Mapping between backing store port and backing store object.
276 */
277struct backing_store {
278 queue_chain_t bs_links; /* link in backing_store_list */
b0d623f7 279 lck_mtx_t bs_lock; /* lock for the structure */
1c79356b
A
280 MACH_PORT_FACE bs_port; /* backing store port */
281 int bs_priority;
282 int bs_clsize; /* cluster size in pages */
283
284 /* statistics */
285 unsigned int bs_pages_free; /* # unallocated pages */
286 unsigned int bs_pages_total; /* # pages (total) */
287 unsigned int bs_pages_in; /* # page read requests */
288 unsigned int bs_pages_in_fail; /* # page read errors */
289 unsigned int bs_pages_out; /* # page write requests */
290 unsigned int bs_pages_out_fail; /* # page write errors */
291};
292typedef struct backing_store *backing_store_t;
293#define BACKING_STORE_NULL ((backing_store_t) 0)
294#define BS_STAT(bs, clause) VSTATS_ACTION(&(bs)->bs_lock, (clause))
295
296#ifdef MACH_KERNEL
b0d623f7
A
297#define BS_LOCK_INIT(bs) lck_mtx_init(&(bs)->bs_lock, &default_pager_lck_grp, &default_pager_lck_attr)
298#define BS_LOCK(bs) lck_mtx_lock(&(bs)->bs_lock)
299#define BS_UNLOCK(bs) lck_mtx_unlock(&(bs)->bs_lock)
1c79356b
A
300
301struct backing_store_list_head {
302 queue_head_t bsl_queue;
b0d623f7 303 lck_mtx_t bsl_lock;
1c79356b
A
304#endif
305};
306extern struct backing_store_list_head backing_store_list;
0b4e3aa0 307extern int backing_store_release_trigger_disable;
1c79356b 308
b0d623f7
A
309#define BSL_LOCK_INIT() lck_mtx_init(&backing_store_list.bsl_lock, &default_pager_lck_grp, &default_pager_lck_attr)
310#define BSL_LOCK() lck_mtx_lock(&backing_store_list.bsl_lock)
311#define BSL_UNLOCK() lck_mtx_unlock(&backing_store_list.bsl_lock)
1c79356b
A
312
313/*
314 * Paging segment management.
315 * Controls allocation of blocks within paging area.
316 */
317struct paging_segment {
318 /* device management */
319 union {
320 MACH_PORT_FACE dev; /* Port to device */
321 struct vnode *vnode; /* vnode for bs file */
322 } storage_type;
323 unsigned int ps_segtype; /* file type or partition */
324 MACH_PORT_FACE ps_device; /* Port to device */
b0d623f7
A
325 dp_offset_t ps_offset; /* Offset of segment within device */
326 dp_offset_t ps_recnum; /* Number of device records in segment*/
1c79356b
A
327 unsigned int ps_pgnum; /* Number of pages in segment */
328 unsigned int ps_record_shift;/* Bit shift: pages to device records */
329
330 /* clusters and pages */
331 unsigned int ps_clshift; /* Bit shift: clusters to pages */
332 unsigned int ps_ncls; /* Number of clusters in segment */
333 unsigned int ps_clcount; /* Number of free clusters */
334 unsigned int ps_pgcount; /* Number of free pages */
b0d623f7
A
335 unsigned int ps_hint; /* Hint of where to look next. */
336 unsigned int ps_special_clusters; /* Clusters that might come in while we've
337 * released the locks doing a ps_delete.
338 */
1c79356b
A
339
340 /* bitmap */
b0d623f7 341 lck_mtx_t ps_lock; /* Lock for contents of struct */
1c79356b
A
342 unsigned char *ps_bmap; /* Map of used clusters */
343
344 /* backing store */
345 backing_store_t ps_bs; /* Backing store segment belongs to */
b0d623f7
A
346#define PS_CAN_USE 0x1
347#define PS_GOING_AWAY 0x2
348#define PS_EMERGENCY_SEGMENT 0x4
349 unsigned int ps_state;
1c79356b
A
350};
351
b0d623f7
A
352#define IS_PS_OK_TO_USE(ps) ((ps->ps_state & PS_CAN_USE) == PS_CAN_USE)
353#define IS_PS_GOING_AWAY(ps) ((ps->ps_state & PS_GOING_AWAY) == PS_GOING_AWAY)
354#define IS_PS_EMERGENCY_SEGMENT(ps) ((ps->ps_state & PS_EMERGENCY_SEGMENT) == PS_EMERGENCY_SEGMENT)
355
1c79356b
A
356#define ps_vnode storage_type.vnode
357#define ps_device storage_type.dev
358#define PS_PARTITION 1
359#define PS_FILE 2
360
361typedef struct paging_segment *paging_segment_t;
362
363#define PAGING_SEGMENT_NULL ((paging_segment_t) 0)
364
b0d623f7
A
365#define PS_LOCK_INIT(ps) lck_mtx_init(&(ps)->ps_lock, &default_pager_lck_grp, &default_pager_lck_attr)
366#define PS_LOCK(ps) lck_mtx_lock(&(ps)->ps_lock)
367#define PS_UNLOCK(ps) lck_mtx_unlock(&(ps)->ps_lock)
1c79356b
A
368
369typedef unsigned int pseg_index_t;
370
371#define INVALID_PSEG_INDEX ((pseg_index_t)-1)
b0d623f7 372#define EMERGENCY_PSEG_INDEX ((pseg_index_t) 0)
1c79356b
A
373/*
374 * MAX_PSEG_INDEX value is related to struct vs_map below.
375 * "0" is reserved for empty map entries (no segment).
376 */
377#define MAX_PSEG_INDEX 63 /* 0 is reserved for empty map */
378#define MAX_NUM_PAGING_SEGMENTS MAX_PSEG_INDEX
379
380/* paging segments array */
381extern paging_segment_t paging_segments[MAX_NUM_PAGING_SEGMENTS];
b0d623f7 382extern lck_mtx_t paging_segments_lock;
1c79356b
A
383extern int paging_segment_count; /* number of active paging segments */
384extern int paging_segment_max; /* highest used paging segment index */
385extern int ps_select_array[DEFAULT_PAGER_BACKING_STORE_MAXPRI+1];
386
b0d623f7
A
387#define PSL_LOCK_INIT() lck_mtx_init(&paging_segments_lock, &default_pager_lck_grp, &default_pager_lck_attr)
388#define PSL_LOCK() lck_mtx_lock(&paging_segments_lock)
389#define PSL_UNLOCK() lck_mtx_unlock(&paging_segments_lock)
1c79356b
A
390
391/*
392 * Vstruct manipulation. The vstruct is the pager's internal
393 * representation of vm objects it manages. There is one vstruct allocated
394 * per vm object.
395 *
396 * The following data structures are defined for vstruct and vm object
397 * management.
398 */
399
400/*
401 * vs_map
402 * A structure used only for temporary objects. It is the element
403 * contained in the vs_clmap structure, which contains information
404 * about which clusters and pages in an object are present on backing
405 * store (a paging file).
406 * Note that this structure and its associated constants may change
407 * with minimal impact on code. The only function which knows the
408 * internals of this structure is ps_clmap().
409 *
410 * If it is necessary to change the maximum number of paging segments
411 * or pages in a cluster, then this structure is the one most
412 * affected. The constants and structures which *may* change are:
413 * MAX_CLUSTER_SIZE
414 * MAX_CLUSTER_SHIFT
415 * MAX_NUM_PAGING_SEGMENTS
416 * VSTRUCT_DEF_CLSHIFT
417 * struct vs_map and associated macros and constants (VSM_*)
418 * (only the macro definitions need change, the exported (inside the
419 * pager only) interfaces remain the same; the constants are for
420 * internal vs_map manipulation only).
421 * struct clbmap (below).
422 */
423struct vs_map {
424 unsigned int vsmap_entry:23, /* offset in paging segment */
425 vsmap_psindex:8, /* paging segment */
426 vsmap_error:1,
427 vsmap_bmap:16,
428 vsmap_alloc:16;
429};
430
431typedef struct vs_map *vs_map_t;
432
433
434#define VSM_ENTRY_NULL 0x7fffff
435
436/*
437 * Exported macros for manipulating the vs_map structure --
438 * checking status, getting and setting bits.
439 */
b0d623f7 440#define VSCLSIZE(vs) (1U << (vs)->vs_clshift)
1c79356b
A
441#define VSM_ISCLR(vsm) (((vsm).vsmap_entry == VSM_ENTRY_NULL) && \
442 ((vsm).vsmap_error == 0))
443#define VSM_ISERR(vsm) ((vsm).vsmap_error)
444#define VSM_SETCLOFF(vsm, val) ((vsm).vsmap_entry = (val))
445#define VSM_SETERR(vsm, err) ((vsm).vsmap_error = 1, \
446 (vsm).vsmap_entry = (err))
447#define VSM_GETERR(vsm) ((vsm).vsmap_entry)
448#define VSM_SETPG(vsm, page) ((vsm).vsmap_bmap |= (1 << (page)))
449#define VSM_CLRPG(vsm, page) ((vsm).vsmap_bmap &= ~(1 << (page)))
450#define VSM_SETPS(vsm, psindx) ((vsm).vsmap_psindex = (psindx))
451#define VSM_PSINDEX(vsm) ((vsm).vsmap_psindex)
452#define VSM_PS(vsm) paging_segments[(vsm).vsmap_psindex]
453#define VSM_BMAP(vsm) ((vsm).vsmap_bmap)
454#define VSM_CLOFF(vsm) ((vsm).vsmap_entry)
455#define VSM_CLR(vsm) ((vsm).vsmap_entry = VSM_ENTRY_NULL, \
456 (vsm).vsmap_psindex = 0, \
457 (vsm).vsmap_error = 0, \
458 (vsm).vsmap_bmap = 0, \
459 (vsm).vsmap_alloc = 0)
460#define VSM_ALLOC(vsm) ((vsm).vsmap_alloc)
461#define VSM_SETALLOC(vsm, page) ((vsm).vsmap_alloc |= (1 << (page)))
462#define VSM_CLRALLOC(vsm, page) ((vsm).vsmap_alloc &= ~(1 << (page)))
463
464/*
465 * Constants and macros for dealing with vstruct maps,
466 * which comprise vs_map structures, which
467 * map vm objects to backing storage (paging files and clusters).
468 */
469#define CLMAP_THRESHOLD 512 /* bytes */
b0d623f7
A
470#define CLMAP_ENTRIES (CLMAP_THRESHOLD/(int)sizeof(struct vs_map))
471#define CLMAP_SIZE(ncls) (ncls*(int)sizeof(struct vs_map))
1c79356b
A
472
473#define INDIRECT_CLMAP_ENTRIES(ncls) (((ncls-1)/CLMAP_ENTRIES) + 1)
b0d623f7 474#define INDIRECT_CLMAP_SIZE(ncls) (INDIRECT_CLMAP_ENTRIES(ncls) * (int)sizeof(struct vs_map *))
1c79356b
A
475#define INDIRECT_CLMAP(size) (CLMAP_SIZE(size) > CLMAP_THRESHOLD)
476
477#define RMAPSIZE(blocks) (howmany(blocks,NBBY))
478
479#define CL_FIND 1
480#define CL_ALLOC 2
481
482/*
483 * clmap
484 *
485 * A cluster map returned by ps_clmap. It is an abstracted cluster of
486 * pages. It gives the caller information about the cluster
487 * desired. On read it tells the caller if a cluster is mapped, and if so,
488 * which of its pages are valid. It should not be referenced directly,
489 * except by ps_clmap; macros should be used. If the number of pages
490 * in a cluster needs to be more than 32, then the struct clbmap must
491 * become larger.
492 */
493struct clbmap {
494 unsigned int clb_map;
495};
496
497struct clmap {
498 paging_segment_t cl_ps; /* paging segment backing cluster */
499 int cl_numpages; /* number of valid pages */
500 struct clbmap cl_bmap; /* map of pages in cluster */
501 int cl_error; /* cluster error value */
502 struct clbmap cl_alloc; /* map of allocated pages in cluster */
503};
504
505#define CLMAP_ERROR(clm) (clm).cl_error
506#define CLMAP_PS(clm) (clm).cl_ps
507#define CLMAP_NPGS(clm) (clm).cl_numpages
508#define CLMAP_ISSET(clm,i) ((1<<(i))&((clm).cl_bmap.clb_map))
509#define CLMAP_ALLOC(clm) (clm).cl_alloc.clb_map
510/*
511 * Shift off unused bits in a partial cluster
512 */
513#define CLMAP_SHIFT(clm,vs) \
514 (clm)->cl_bmap.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
515#define CLMAP_SHIFTALLOC(clm,vs) \
516 (clm)->cl_alloc.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
517
518typedef struct vstruct_alias {
0c530ab8 519 memory_object_pager_ops_t name;
1c79356b
A
520 struct vstruct *vs;
521} vstruct_alias_t;
522
b0d623f7
A
523#define DPT_LOCK_INIT(lock) lck_mtx_init(&(lock), &default_pager_lck_grp, &default_pager_lck_attr)
524#define DPT_LOCK(lock) lck_mtx_lock(&(lock))
525#define DPT_UNLOCK(lock) lck_mtx_unlock(&(lock))
526#define DPT_SLEEP(lock, e, i) lck_mtx_sleep(&(lock), LCK_SLEEP_DEFAULT, (event_t)(e), i)
527#define VS_LOCK_TYPE hw_lock_data_t
528#define VS_LOCK_INIT(vs) hw_lock_init(&(vs)->vs_lock)
529#define VS_TRY_LOCK(vs) (VS_LOCK(vs),TRUE)
530#define VS_LOCK(vs) hw_lock_lock(&(vs)->vs_lock)
531#define VS_UNLOCK(vs) hw_lock_unlock(&(vs)->vs_lock)
532#define VS_MAP_LOCK_TYPE lck_mtx_t
533#define VS_MAP_LOCK_INIT(vs) lck_mtx_init(&(vs)->vs_map_lock, &default_pager_lck_grp, &default_pager_lck_attr)
534#define VS_MAP_LOCK(vs) lck_mtx_lock(&(vs)->vs_map_lock)
535#define VS_MAP_TRY_LOCK(vs) lck_mtx_try_lock(&(vs)->vs_map_lock)
536#define VS_MAP_UNLOCK(vs) lck_mtx_unlock(&(vs)->vs_map_lock)
0b4e3aa0
A
537
538
539/*
540 * VM Object Structure: This is the structure used to manage
541 * default pager object associations with their control counter-
542 * parts (VM objects).
0c530ab8
A
543 *
544 * The start of this structure MUST match a "struct memory_object".
0b4e3aa0
A
545 */
546typedef struct vstruct {
b0d623f7 547 struct ipc_object_header vs_pager_header; /* fake ip_kotype() */
0c530ab8 548 memory_object_pager_ops_t vs_pager_ops; /* == &default_pager_ops */
0b4e3aa0
A
549 memory_object_control_t vs_control; /* our mem obj control ref */
550 VS_LOCK_TYPE vs_lock; /* data for the lock */
551
552 /* JMM - Could combine these first two in a single pending count now */
553 unsigned int vs_next_seqno; /* next sequence num to issue */
554 unsigned int vs_seqno; /* Pager port sequence number */
555 unsigned int vs_readers; /* Reads in progress */
556 unsigned int vs_writers; /* Writes in progress */
1c79356b 557
2d21ac55 558 unsigned int
0b4e3aa0
A
559 /* boolean_t */ vs_waiting_seqno:1, /* to wait on seqno */
560 /* boolean_t */ vs_waiting_read:1, /* waiting on reader? */
561 /* boolean_t */ vs_waiting_write:1, /* waiting on writer? */
562 /* boolean_t */ vs_waiting_async:1, /* waiting on async? */
563 /* boolean_t */ vs_indirect:1, /* map indirect? */
564 /* boolean_t */ vs_xfer_pending:1; /* xfer out of seg? */
1c79356b 565
0b4e3aa0
A
566 unsigned int vs_async_pending;/* pending async write count */
567 unsigned int vs_errors; /* Pageout error count */
568 unsigned int vs_references; /* references */
1c79356b 569
0b4e3aa0
A
570 queue_chain_t vs_links; /* Link in pager-wide list */
571
91447636
A
572 unsigned int vs_clshift; /* Bit shift: clusters->pages */
573 unsigned int vs_size; /* Object size in clusters */
b0d623f7 574 lck_mtx_t vs_map_lock; /* to protect map below */
1c79356b
A
575 union {
576 struct vs_map *vsu_dmap; /* Direct map of clusters */
577 struct vs_map **vsu_imap; /* Indirect map of clusters */
578 } vs_un;
579} *vstruct_t;
580
581#define vs_dmap vs_un.vsu_dmap
582#define vs_imap vs_un.vsu_imap
1c79356b
A
583
584#define VSTRUCT_NULL ((vstruct_t) 0)
585
0b4e3aa0
A
586__private_extern__ void vs_async_wait(vstruct_t);
587
588#if PARALLEL
589__private_extern__ void vs_lock(vstruct_t);
590__private_extern__ void vs_unlock(vstruct_t);
591__private_extern__ void vs_start_read(vstruct_t);
592__private_extern__ void vs_finish_read(vstruct_t);
593__private_extern__ void vs_wait_for_readers(vstruct_t);
594__private_extern__ void vs_start_write(vstruct_t);
595__private_extern__ void vs_finish_write(vstruct_t);
596__private_extern__ void vs_wait_for_writers(vstruct_t);
91447636 597__private_extern__ void vs_wait_for_sync_writers(vstruct_t);
0b4e3aa0
A
598#else /* PARALLEL */
599#define vs_lock(vs)
600#define vs_unlock(vs)
601#define vs_start_read(vs)
602#define vs_wait_for_readers(vs)
603#define vs_finish_read(vs)
604#define vs_start_write(vs)
605#define vs_wait_for_writers(vs)
606#define vs_wait_for_sync_writers(vs)
607#define vs_finish_write(vs)
608#endif /* PARALLEL */
1c79356b
A
609
610/*
611 * Data structures and variables dealing with asynchronous
612 * completion of paging operations.
613 */
614/*
615 * vs_async
616 * A structure passed to ps_write_device for asynchronous completions.
617 * It contains enough information to complete the write and
618 * inform the VM of its completion.
619 */
620struct vs_async {
621 struct vs_async *vsa_next; /* pointer to next structure */
622 vstruct_t vsa_vs; /* the vstruct for the object */
623 vm_offset_t vsa_addr; /* the vaddr of the data moved */
624 vm_offset_t vsa_offset; /* the object offset of the data */
625 vm_size_t vsa_size; /* the number of bytes moved */
626 paging_segment_t vsa_ps; /* the paging segment used */
627 int vsa_flags; /* flags */
628 int vsa_error; /* error, if there is one */
1c79356b
A
629 MACH_PORT_FACE reply_port; /* associated reply port */
630};
631
632/*
633 * flags values.
634 */
635#define VSA_READ 0x0001
636#define VSA_WRITE 0x0002
637#define VSA_TRANSFER 0x0004
638
639/*
640 * List of all vstructs. A specific vstruct is
641 * found directly via its port, this list is
642 * only used for monitoring purposes by the
643 * default_pager_object* calls
644 */
645struct vstruct_list_head {
646 queue_head_t vsl_queue;
b0d623f7 647 lck_mtx_t vsl_lock;
1c79356b 648 int vsl_count; /* saves code */
1c79356b 649};
0b4e3aa0
A
650
651__private_extern__ struct vstruct_list_head vstruct_list;
652
653__private_extern__ void vstruct_list_insert(vstruct_t vs);
654__private_extern__ void vstruct_list_delete(vstruct_t vs);
655
1c79356b 656
b0d623f7
A
657extern lck_grp_t default_pager_lck_grp;
658extern lck_attr_t default_pager_lck_attr;
659
660#define VSL_LOCK_INIT() lck_mtx_init(&vstruct_list.vsl_lock, &default_pager_lck_grp, &default_pager_lck_attr)
661#define VSL_LOCK() lck_mtx_lock(&vstruct_list.vsl_lock)
662#define VSL_LOCK_TRY() lck_mtx_try_lock(&vstruct_list.vsl_lock)
663#define VSL_UNLOCK() lck_mtx_unlock(&vstruct_list.vsl_lock)
664#define VSL_SLEEP(e,i) lck_mtx_sleep(&vstruct_list.vsl_lock, LCK_SLEEP_DEFAULT, (e), (i))
1c79356b 665
0b4e3aa0
A
666#ifdef MACH_KERNEL
667__private_extern__ zone_t vstruct_zone;
668#endif
669
1c79356b
A
670/*
671 * Create port alias for vstruct address.
672 *
673 * We assume that the last two bits of a vstruct address will be zero due to
674 * memory allocation restrictions, hence are available for use as a sanity
675 * check.
676 */
677#ifdef MACH_KERNEL
0b4e3aa0 678
0c530ab8
A
679extern const struct memory_object_pager_ops default_pager_ops;
680
0b4e3aa0 681#define mem_obj_is_vs(_mem_obj_) \
0c530ab8
A
682 (((_mem_obj_) != NULL) && \
683 ((_mem_obj_)->mo_pager_ops == &default_pager_ops))
0b4e3aa0
A
684#define mem_obj_to_vs(_mem_obj_) \
685 ((vstruct_t)(_mem_obj_))
686#define vs_to_mem_obj(_vs_) ((memory_object_t)(_vs_))
687#define vs_lookup(_mem_obj_, _vs_) \
688 do { \
689 if (!mem_obj_is_vs(_mem_obj_)) \
690 panic("bad dp memory object"); \
691 _vs_ = mem_obj_to_vs(_mem_obj_); \
692 } while (0)
693#define vs_lookup_safe(_mem_obj_, _vs_) \
1c79356b 694 do { \
0b4e3aa0
A
695 if (!mem_obj_is_vs(_mem_obj_)) \
696 _vs_ = VSTRUCT_NULL; \
697 else \
698 _vs_ = mem_obj_to_vs(_mem_obj_); \
1c79356b
A
699 } while (0)
700#else
0b4e3aa0 701
1c79356b
A
702#define vs_to_port(_vs_) (((vm_offset_t)(_vs_))+1)
703#define port_to_vs(_port_) ((vstruct_t)(((vm_offset_t)(_port_))&~3))
704#define port_is_vs(_port_) ((((vm_offset_t)(_port_))&3) == 1)
705
706#define vs_lookup(_port_, _vs_) \
707 do { \
708 if (!MACH_PORT_VALID(_port_) || !port_is_vs(_port_) \
0b4e3aa0 709 || port_to_vs(_port_)->vs_mem_obj != (_port_)) \
1c79356b
A
710 Panic("bad pager port"); \
711 _vs_ = port_to_vs(_port_); \
712 } while (0)
713#endif
714
715/*
716 * Cross-module routines declaration.
717 */
718#ifndef MACH_KERNEL
719extern int dp_thread_id(void);
720#endif
721extern boolean_t device_reply_server(mach_msg_header_t *,
722 mach_msg_header_t *);
723#ifdef MACH_KERNEL
0b4e3aa0 724extern boolean_t default_pager_no_senders(memory_object_t,
1c79356b
A
725 mach_port_mscount_t);
726#else
727extern void default_pager_no_senders(memory_object_t,
728 mach_port_seqno_t,
729 mach_port_mscount_t);
730#endif
0b4e3aa0 731
1c79356b
A
732extern int local_log2(unsigned int);
733extern void bs_initialize(void);
b0d623f7
A
734extern void bs_global_info(uint64_t *,
735 uint64_t *);
1c79356b
A
736extern boolean_t bs_add_device(char *,
737 MACH_PORT_FACE);
b0d623f7 738extern vstruct_t ps_vstruct_create(dp_size_t);
1c79356b
A
739extern void ps_vstruct_dealloc(vstruct_t);
740extern kern_return_t pvs_cluster_read(vstruct_t,
b0d623f7
A
741 dp_offset_t,
742 dp_size_t,
2d21ac55 743 void *);
1c79356b
A
744extern kern_return_t vs_cluster_write(vstruct_t,
745 upl_t,
91447636
A
746 upl_offset_t,
747 upl_size_t,
1c79356b
A
748 boolean_t,
749 int);
b0d623f7
A
750extern dp_offset_t ps_clmap(vstruct_t,
751 dp_offset_t,
1c79356b
A
752 struct clmap *,
753 int,
b0d623f7 754 dp_size_t,
1c79356b
A
755 int);
756extern vm_size_t ps_vstruct_allocated_size(vstruct_t);
b0d623f7 757extern unsigned int ps_vstruct_allocated_pages(vstruct_t,
1c79356b 758 default_pager_page_t *,
b0d623f7 759 unsigned int);
1c79356b
A
760extern boolean_t bs_set_default_clsize(unsigned int);
761
762extern boolean_t verbose;
763
91447636 764extern thread_call_t default_pager_backing_store_monitor_callout;
55e303ae
A
765extern void default_pager_backing_store_monitor(thread_call_param_t, thread_call_param_t);
766
91447636
A
767extern ipc_port_t max_pages_trigger_port;
768extern unsigned int dp_pages_free;
769extern unsigned int maximum_pages_free;
770
771/* Do we know yet if swap files need to be encrypted ? */
772extern boolean_t dp_encryption_inited;
773/* Should we encrypt data before writing to swap ? */
774extern boolean_t dp_encryption;
775
1c79356b 776#endif /* _DEFAULT_PAGER_INTERNAL_H_ */