]> git.saurik.com Git - apple/xnu.git/blob - osfmk/default_pager/default_pager_internal.h
738d74887646e943ce22bcba1fbb03b255c0493f
[apple/xnu.git] / osfmk / default_pager / default_pager_internal.h
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58
59 /*
60 * Default pager.
61 * General definitions.
62 */
63
64 #ifndef _DEFAULT_PAGER_INTERNAL_H_
65 #define _DEFAULT_PAGER_INTERNAL_H_
66
67 #include <default_pager/diag.h>
68 #include <default_pager/default_pager_types.h>
69 #include <mach/mach_types.h>
70 #include <ipc/ipc_port.h>
71 #include <ipc/ipc_types.h>
72 #include <ipc/ipc_space.h>
73 #include <kern/lock.h>
74 #include <kern/kalloc.h>
75 #include <kern/thread.h>
76 #include <vm/vm_kern.h>
77 #include <device/device_types.h>
78
79 /*
80 * Default option settings.
81 */
82 #ifndef PARALLEL
83 #define PARALLEL 1
84 #endif
85
86 #ifndef CHECKSUM
87 #define CHECKSUM 0
88 #endif
89
90 #define MACH_PORT_FACE mach_port_t
91
92 #if 0
93 #ifndef USE_PRECIOUS
94 #define USE_PRECIOUS TRUE
95 #endif
96 #endif
97
98 #ifdef USER_PAGER
99 #define UP(stuff) stuff
100 #else /* USER_PAGER */
101 #define UP(stuff)
102 #endif /* USER_PAGER */
103
104 #ifndef MACH_KERNEL
105 extern struct mutex dprintf_lock;
106 #define PRINTF_LOCK_INIT() mutex_init(&dprintf_lock)
107 #define PRINTF_LOCK() mutex_lock(&dprintf_lock)
108 #define PRINTF_UNLOCK() mutex_unlock(&dprintf_lock)
109 #endif
110
111 #ifndef MACH_KERNEL
112 #define dprintf(args) \
113 do { \
114 PRINTF_LOCK(); \
115 printf("%s[%d]: ", my_name, dp_thread_id()); \
116 printf args; \
117 PRINTF_UNLOCK(); \
118 } while (0)
119 #else
120 #define dprintf(args) \
121 do { \
122 printf("%s[KERNEL]: ", my_name); \
123 printf args; \
124 } while (0)
125 #endif
126
127 /*
128 * Debug.
129 */
130 __private_extern__ char my_name[];
131
132 #define DEFAULT_PAGER_DEBUG 0
133
134 #if DEFAULT_PAGER_DEBUG
135
136 extern int debug_mask;
137 #define DEBUG_MSG_EXTERNAL 0x00000001
138 #define DEBUG_MSG_INTERNAL 0x00000002
139 #define DEBUG_MO_EXTERNAL 0x00000100
140 #define DEBUG_MO_INTERNAL 0x00000200
141 #define DEBUG_VS_EXTERNAL 0x00010000
142 #define DEBUG_VS_INTERNAL 0x00020000
143 #define DEBUG_BS_EXTERNAL 0x01000000
144 #define DEBUG_BS_INTERNAL 0x02000000
145
146 #define DP_DEBUG(level, args) \
147 do { \
148 if (debug_mask & (level)) \
149 dprintf(args); \
150 } while (0)
151
152 #define ASSERT(expr) \
153 do { \
154 if (!(expr)) \
155 #ifndef MACH_KERNEL
156 panic("%s[%d]%s: assertion failed in %s line %d: %s",\
157 my_name, dp_thread_id(), here, \
158 __FILE__, __LINE__, # expr); \
159 #else
160 panic("%s[KERNEL]: assertion failed in %s line %d: %s",\
161 my_name, __FILE__, __LINE__, # expr); \
162 #endif
163 } while (0)
164
165 #else /* DEFAULT_PAGER_DEBUG */
166
167 #define DP_DEBUG(level, args)
168 #define ASSERT(clause)
169
170 #endif /* DEFAULT_PAGER_DEBUG */
171
172 #ifndef MACH_KERNEL
173 extern char *mach_error_string(kern_return_t);
174 #endif
175
176 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
177
178 #define PAGER_SUCCESS 0
179 #define PAGER_FULL 1
180 #define PAGER_ERROR 2
181
182 /*
183 * VM and IPC globals.
184 */
185 #ifdef MACH_KERNEL
186 #define vm_page_size page_size
187 #else
188 extern vm_object_size_t vm_page_size;
189 #endif
190 extern unsigned long long vm_page_mask;
191 extern int vm_page_shift;
192
193 #ifndef MACH_KERNEL
194 #define ptoa(p) ((p)*vm_page_size)
195 #define atop(a) ((a)/vm_page_size)
196 #endif
197 #define howmany(a,b) (((a) + (b) - 1)/(b))
198
199 extern memory_object_default_t default_pager_object;
200
201 #ifdef MACH_KERNEL
202 extern mutex_t dpt_lock; /* Lock for the dpt array */
203 extern int default_pager_internal_count;
204 extern MACH_PORT_FACE default_pager_host_port;
205 /* extern task_t default_pager_self; */ /* dont need or want */
206 extern MACH_PORT_FACE default_pager_internal_set;
207 extern MACH_PORT_FACE default_pager_external_set;
208 extern MACH_PORT_FACE default_pager_default_set;
209 #else
210 extern mach_port_t default_pager_host_port;
211 extern task_port_t default_pager_self;
212 extern mach_port_t default_pager_internal_set;
213 extern mach_port_t default_pager_external_set;
214 extern mach_port_t default_pager_default_set;
215 #endif
216
217 typedef struct default_pager_thread {
218 #ifndef MACH_KERNEL
219 cthread_t dpt_thread; /* Server thread. */
220 #endif
221 vm_offset_t dpt_buffer; /* Read buffer. */
222 boolean_t dpt_internal; /* Do we handle internal objects? */
223 #ifndef MACH_KERNEL
224 int dpt_id; /* thread id for printf */
225 #else
226 int checked_out;
227 #endif
228 boolean_t dpt_initialized_p; /* Thread is ready for requests. */
229 } default_pager_thread_t;
230
231 #ifdef MACH_KERNEL
232 extern default_pager_thread_t **dpt_array;
233 #endif
234
235 /*
236 * Global statistics.
237 */
238 struct {
239 unsigned int gs_pageout_calls; /* # pageout calls */
240 unsigned int gs_pagein_calls; /* # pagein calls */
241 unsigned int gs_pages_in; /* # pages paged in (total) */
242 unsigned int gs_pages_out; /* # pages paged out (total) */
243 unsigned int gs_pages_unavail; /* # zero-fill pages */
244 unsigned int gs_pages_init; /* # page init requests */
245 unsigned int gs_pages_init_writes; /* # page init writes */
246 VSTATS_LOCK_DECL(gs_lock)
247 } global_stats;
248 #define GSTAT(clause) VSTATS_ACTION(&global_stats.gs_lock, (clause))
249
250 /*
251 * Cluster related definitions.
252 * Clusters are sized in number of pages per cluster.
253 * Cluster sizes must be powers of two.
254 *
255 * These numbers are related to the struct vs_map,
256 * defined below.
257 */
258 #define MAX_CLUSTER_SIZE 8
259 #define MAX_CLUSTER_SHIFT 3
260 #define NO_CLSIZE 0
261
262 /*
263 * bit map related macros
264 */
265 #define NBBY 8 /* bits per byte XXX */
266 #define BYTEMASK 0xff
267 #define setbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) |= 1<<((i)%NBBY))
268 #define clrbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) &= ~(1<<((i)%NBBY)))
269 #define isset(a,i) (*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY)))
270 #define isclr(a,i) ((*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY))) == 0)
271
272 /*
273 * Default Pager.
274 * Backing Store Management.
275 */
276
277 #define BS_MAXPRI 4
278 #define BS_MINPRI 0
279 #define BS_NOPRI -1
280 #define BS_FULLPRI -2
281
282 /*
283 * Mapping between backing store port and backing store object.
284 */
285 struct backing_store {
286 queue_chain_t bs_links; /* link in backing_store_list */
287 #ifdef MACH_KERNEL
288 mutex_t bs_lock; /* lock for the structure */
289 #else
290 struct mutex bs_lock; /* lock for the structure */
291 #endif
292 MACH_PORT_FACE bs_port; /* backing store port */
293 int bs_priority;
294 int bs_clsize; /* cluster size in pages */
295
296 /* statistics */
297 unsigned int bs_pages_free; /* # unallocated pages */
298 unsigned int bs_pages_total; /* # pages (total) */
299 unsigned int bs_pages_in; /* # page read requests */
300 unsigned int bs_pages_in_fail; /* # page read errors */
301 unsigned int bs_pages_out; /* # page write requests */
302 unsigned int bs_pages_out_fail; /* # page write errors */
303 };
304 typedef struct backing_store *backing_store_t;
305 #define BACKING_STORE_NULL ((backing_store_t) 0)
306 #define BS_STAT(bs, clause) VSTATS_ACTION(&(bs)->bs_lock, (clause))
307
308 #ifdef MACH_KERNEL
309 #define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock, 0)
310 #else
311 #define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock)
312 #endif
313 #define BS_LOCK(bs) mutex_lock(&(bs)->bs_lock)
314 #define BS_UNLOCK(bs) mutex_unlock(&(bs)->bs_lock)
315
316 struct backing_store_list_head {
317 queue_head_t bsl_queue;
318 #ifdef MACH_KERNEL
319 mutex_t bsl_lock;
320 #else
321 struct mutex bsl_lock;
322 #endif
323 };
324 extern struct backing_store_list_head backing_store_list;
325 extern int backing_store_release_trigger_disable;
326
327 #ifdef MACH_KERNEL
328 #define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock, 0)
329 #else
330 #define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock)
331 #endif
332 #define BSL_LOCK() mutex_lock(&backing_store_list.bsl_lock)
333 #define BSL_UNLOCK() mutex_unlock(&backing_store_list.bsl_lock)
334
335 /*
336 * Paging segment management.
337 * Controls allocation of blocks within paging area.
338 */
339 struct paging_segment {
340 /* device management */
341 union {
342 MACH_PORT_FACE dev; /* Port to device */
343 struct vnode *vnode; /* vnode for bs file */
344 } storage_type;
345 unsigned int ps_segtype; /* file type or partition */
346 MACH_PORT_FACE ps_device; /* Port to device */
347 vm_offset_t ps_offset; /* Offset of segment within device */
348 vm_offset_t ps_recnum; /* Number of device records in segment*/
349 unsigned int ps_pgnum; /* Number of pages in segment */
350 unsigned int ps_record_shift;/* Bit shift: pages to device records */
351
352 /* clusters and pages */
353 unsigned int ps_clshift; /* Bit shift: clusters to pages */
354 unsigned int ps_ncls; /* Number of clusters in segment */
355 unsigned int ps_clcount; /* Number of free clusters */
356 unsigned int ps_pgcount; /* Number of free pages */
357 unsigned long ps_hint; /* Hint of where to look next. */
358
359 /* bitmap */
360 #ifdef MACH_KERNEL
361 mutex_t ps_lock; /* Lock for contents of struct */
362 #else
363 struct mutex ps_lock; /* Lock for contents of struct */
364 #endif
365 unsigned char *ps_bmap; /* Map of used clusters */
366
367 /* backing store */
368 backing_store_t ps_bs; /* Backing store segment belongs to */
369
370 boolean_t ps_going_away; /* Destroy attempt in progress */
371 };
372
373 #define ps_vnode storage_type.vnode
374 #define ps_device storage_type.dev
375 #define PS_PARTITION 1
376 #define PS_FILE 2
377
378 typedef struct paging_segment *paging_segment_t;
379
380 #define PAGING_SEGMENT_NULL ((paging_segment_t) 0)
381
382 #ifdef MACH_KERNEL
383 #define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock, 0)
384 #else
385 #define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock)
386 #endif
387 #define PS_LOCK(ps) mutex_lock(&(ps)->ps_lock)
388 #define PS_UNLOCK(ps) mutex_unlock(&(ps)->ps_lock)
389
390 typedef unsigned int pseg_index_t;
391
392 #define INVALID_PSEG_INDEX ((pseg_index_t)-1)
393 #define NULL_PSEG_INDEX ((pseg_index_t) 0)
394 /*
395 * MAX_PSEG_INDEX value is related to struct vs_map below.
396 * "0" is reserved for empty map entries (no segment).
397 */
398 #define MAX_PSEG_INDEX 63 /* 0 is reserved for empty map */
399 #define MAX_NUM_PAGING_SEGMENTS MAX_PSEG_INDEX
400
401 /* paging segments array */
402 extern paging_segment_t paging_segments[MAX_NUM_PAGING_SEGMENTS];
403 #ifdef MACH_KERNEL
404 extern mutex_t paging_segments_lock;
405 #else
406 extern struct mutex paging_segments_lock;
407 #endif
408 extern int paging_segment_count; /* number of active paging segments */
409 extern int paging_segment_max; /* highest used paging segment index */
410 extern int ps_select_array[DEFAULT_PAGER_BACKING_STORE_MAXPRI+1];
411
412 #ifdef MACH_KERNEL
413 #define PSL_LOCK_INIT() mutex_init(&paging_segments_lock, 0)
414 #else
415 #define PSL_LOCK_INIT() mutex_init(&paging_segments_lock)
416 #endif
417 #define PSL_LOCK() mutex_lock(&paging_segments_lock)
418 #define PSL_UNLOCK() mutex_unlock(&paging_segments_lock)
419
420 /*
421 * Vstruct manipulation. The vstruct is the pager's internal
422 * representation of vm objects it manages. There is one vstruct allocated
423 * per vm object.
424 *
425 * The following data structures are defined for vstruct and vm object
426 * management.
427 */
428
429 /*
430 * vs_map
431 * A structure used only for temporary objects. It is the element
432 * contained in the vs_clmap structure, which contains information
433 * about which clusters and pages in an object are present on backing
434 * store (a paging file).
435 * Note that this structure and its associated constants may change
436 * with minimal impact on code. The only function which knows the
437 * internals of this structure is ps_clmap().
438 *
439 * If it is necessary to change the maximum number of paging segments
440 * or pages in a cluster, then this structure is the one most
441 * affected. The constants and structures which *may* change are:
442 * MAX_CLUSTER_SIZE
443 * MAX_CLUSTER_SHIFT
444 * MAX_NUM_PAGING_SEGMENTS
445 * VSTRUCT_DEF_CLSHIFT
446 * struct vs_map and associated macros and constants (VSM_*)
447 * (only the macro definitions need change, the exported (inside the
448 * pager only) interfaces remain the same; the constants are for
449 * internal vs_map manipulation only).
450 * struct clbmap (below).
451 */
452 struct vs_map {
453 unsigned int vsmap_entry:23, /* offset in paging segment */
454 vsmap_psindex:8, /* paging segment */
455 vsmap_error:1,
456 vsmap_bmap:16,
457 vsmap_alloc:16;
458 };
459
460 typedef struct vs_map *vs_map_t;
461
462
463 #define VSM_ENTRY_NULL 0x7fffff
464
465 /*
466 * Exported macros for manipulating the vs_map structure --
467 * checking status, getting and setting bits.
468 */
469 #define VSCLSIZE(vs) (1UL << (vs)->vs_clshift)
470 #define VSM_ISCLR(vsm) (((vsm).vsmap_entry == VSM_ENTRY_NULL) && \
471 ((vsm).vsmap_error == 0))
472 #define VSM_ISERR(vsm) ((vsm).vsmap_error)
473 #define VSM_SETCLOFF(vsm, val) ((vsm).vsmap_entry = (val))
474 #define VSM_SETERR(vsm, err) ((vsm).vsmap_error = 1, \
475 (vsm).vsmap_entry = (err))
476 #define VSM_GETERR(vsm) ((vsm).vsmap_entry)
477 #define VSM_SETPG(vsm, page) ((vsm).vsmap_bmap |= (1 << (page)))
478 #define VSM_CLRPG(vsm, page) ((vsm).vsmap_bmap &= ~(1 << (page)))
479 #define VSM_SETPS(vsm, psindx) ((vsm).vsmap_psindex = (psindx))
480 #define VSM_PSINDEX(vsm) ((vsm).vsmap_psindex)
481 #define VSM_PS(vsm) paging_segments[(vsm).vsmap_psindex]
482 #define VSM_BMAP(vsm) ((vsm).vsmap_bmap)
483 #define VSM_CLOFF(vsm) ((vsm).vsmap_entry)
484 #define VSM_CLR(vsm) ((vsm).vsmap_entry = VSM_ENTRY_NULL, \
485 (vsm).vsmap_psindex = 0, \
486 (vsm).vsmap_error = 0, \
487 (vsm).vsmap_bmap = 0, \
488 (vsm).vsmap_alloc = 0)
489 #define VSM_ALLOC(vsm) ((vsm).vsmap_alloc)
490 #define VSM_SETALLOC(vsm, page) ((vsm).vsmap_alloc |= (1 << (page)))
491 #define VSM_CLRALLOC(vsm, page) ((vsm).vsmap_alloc &= ~(1 << (page)))
492
493 /*
494 * Constants and macros for dealing with vstruct maps,
495 * which comprise vs_map structures, which
496 * map vm objects to backing storage (paging files and clusters).
497 */
498 #define CLMAP_THRESHOLD 512 /* bytes */
499 #define CLMAP_ENTRIES (CLMAP_THRESHOLD/sizeof(struct vs_map))
500 #define CLMAP_SIZE(ncls) (ncls*sizeof(struct vs_map))
501
502 #define INDIRECT_CLMAP_ENTRIES(ncls) (((ncls-1)/CLMAP_ENTRIES) + 1)
503 #define INDIRECT_CLMAP_SIZE(ncls) (INDIRECT_CLMAP_ENTRIES(ncls) * sizeof(struct vs_map *))
504 #define INDIRECT_CLMAP(size) (CLMAP_SIZE(size) > CLMAP_THRESHOLD)
505
506 #define RMAPSIZE(blocks) (howmany(blocks,NBBY))
507
508 #define CL_FIND 1
509 #define CL_ALLOC 2
510
511 /*
512 * clmap
513 *
514 * A cluster map returned by ps_clmap. It is an abstracted cluster of
515 * pages. It gives the caller information about the cluster
516 * desired. On read it tells the caller if a cluster is mapped, and if so,
517 * which of its pages are valid. It should not be referenced directly,
518 * except by ps_clmap; macros should be used. If the number of pages
519 * in a cluster needs to be more than 32, then the struct clbmap must
520 * become larger.
521 */
522 struct clbmap {
523 unsigned int clb_map;
524 };
525
526 struct clmap {
527 paging_segment_t cl_ps; /* paging segment backing cluster */
528 int cl_numpages; /* number of valid pages */
529 struct clbmap cl_bmap; /* map of pages in cluster */
530 int cl_error; /* cluster error value */
531 struct clbmap cl_alloc; /* map of allocated pages in cluster */
532 };
533
534 #define CLMAP_ERROR(clm) (clm).cl_error
535 #define CLMAP_PS(clm) (clm).cl_ps
536 #define CLMAP_NPGS(clm) (clm).cl_numpages
537 #define CLMAP_ISSET(clm,i) ((1<<(i))&((clm).cl_bmap.clb_map))
538 #define CLMAP_ALLOC(clm) (clm).cl_alloc.clb_map
539 /*
540 * Shift off unused bits in a partial cluster
541 */
542 #define CLMAP_SHIFT(clm,vs) \
543 (clm)->cl_bmap.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
544 #define CLMAP_SHIFTALLOC(clm,vs) \
545 (clm)->cl_alloc.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
546
547 typedef struct vstruct_alias {
548 int *name;
549 struct vstruct *vs;
550 } vstruct_alias_t;
551
552 #ifdef MACH_KERNEL
553 #define DPT_LOCK_INIT(lock) mutex_init(&(lock), 0)
554 #define DPT_LOCK(lock) mutex_lock(&(lock))
555 #define DPT_UNLOCK(lock) mutex_unlock(&(lock))
556 #define DPT_SLEEP(lock, e, i) thread_sleep_mutex(&(lock), (event_t)(e), i)
557 #define VS_LOCK_TYPE hw_lock_data_t
558 #define VS_LOCK_INIT(vs) hw_lock_init(&(vs)->vs_lock)
559 #define VS_TRY_LOCK(vs) (VS_LOCK(vs),TRUE)
560 #define VS_LOCK(vs) hw_lock_lock(&(vs)->vs_lock)
561 #define VS_UNLOCK(vs) hw_lock_unlock(&(vs)->vs_lock)
562 #define VS_MAP_LOCK_TYPE mutex_t
563 #define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock, 0)
564 #define VS_MAP_LOCK(vs) mutex_lock(&(vs)->vs_map_lock)
565 #define VS_MAP_TRY_LOCK(vs) mutex_try(&(vs)->vs_map_lock)
566 #define VS_MAP_UNLOCK(vs) mutex_unlock(&(vs)->vs_map_lock)
567 #else
568 #define VS_LOCK_TYPE struct mutex
569 #define VS_LOCK_INIT(vs) mutex_init(&(vs)->vs_lock, 0)
570 #define VS_TRY_LOCK(vs) mutex_try(&(vs)->vs_lock)
571 #define VS_LOCK(vs) mutex_lock(&(vs)->vs_lock)
572 #define VS_UNLOCK(vs) mutex_unlock(&(vs)->vs_lock)
573 #define VS_MAP_LOCK_TYPE struct mutex
574 #define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock)
575 #define VS_MAP_LOCK(vs) mutex_lock(&(vs)->vs_map_lock)
576 #define VS_MAP_TRY_LOCK(vs) mutex_try(&(vs)->vs_map_lock)
577 #define VS_MAP_UNLOCK(vs) mutex_unlock(&(vs)->vs_map_lock)
578 #endif
579
580
581 /*
582 * VM Object Structure: This is the structure used to manage
583 * default pager object associations with their control counter-
584 * parts (VM objects).
585 */
586 typedef struct vstruct {
587 int *vs_mem_obj; /* our memory obj - temp */
588 int vs_mem_obj_ikot;/* JMM:fake ip_kotype() */
589 memory_object_control_t vs_control; /* our mem obj control ref */
590 VS_LOCK_TYPE vs_lock; /* data for the lock */
591
592 /* JMM - Could combine these first two in a single pending count now */
593 unsigned int vs_next_seqno; /* next sequence num to issue */
594 unsigned int vs_seqno; /* Pager port sequence number */
595 unsigned int vs_readers; /* Reads in progress */
596 unsigned int vs_writers; /* Writes in progress */
597
598 #ifdef MACH_KERNEL
599 int
600 /* boolean_t */ vs_waiting_seqno:1, /* to wait on seqno */
601 /* boolean_t */ vs_waiting_read:1, /* waiting on reader? */
602 /* boolean_t */ vs_waiting_write:1, /* waiting on writer? */
603 /* boolean_t */ vs_waiting_async:1, /* waiting on async? */
604 /* boolean_t */ vs_indirect:1, /* map indirect? */
605 /* boolean_t */ vs_xfer_pending:1; /* xfer out of seg? */
606 #else
607 event_t vs_waiting_seqno;/* to wait on seqno */
608 event_t vs_waiting_read; /* to wait on readers */
609 event_t vs_waiting_write;/* to wait on writers */
610 event_t vs_waiting_async;/* to wait on async_pending */
611 int vs_indirect:1, /* Is the map indirect ? */
612 vs_xfer_pending:1; /* xfering out of a seg ? */
613 #endif
614
615 unsigned int vs_async_pending;/* pending async write count */
616 unsigned int vs_errors; /* Pageout error count */
617 unsigned int vs_references; /* references */
618
619 queue_chain_t vs_links; /* Link in pager-wide list */
620
621 unsigned int vs_clshift; /* Bit shift: clusters->pages */
622 unsigned int vs_size; /* Object size in clusters */
623 #ifdef MACH_KERNEL
624 mutex_t vs_map_lock; /* to protect map below */
625 #else
626 struct mutex vs_map_lock; /* to protect map below */
627 #endif
628 union {
629 struct vs_map *vsu_dmap; /* Direct map of clusters */
630 struct vs_map **vsu_imap; /* Indirect map of clusters */
631 } vs_un;
632 } *vstruct_t;
633
634 #define vs_dmap vs_un.vsu_dmap
635 #define vs_imap vs_un.vsu_imap
636
637 #define VSTRUCT_NULL ((vstruct_t) 0)
638
639 __private_extern__ void vs_async_wait(vstruct_t);
640
641 #if PARALLEL
642 __private_extern__ void vs_lock(vstruct_t);
643 __private_extern__ void vs_unlock(vstruct_t);
644 __private_extern__ void vs_start_read(vstruct_t);
645 __private_extern__ void vs_finish_read(vstruct_t);
646 __private_extern__ void vs_wait_for_readers(vstruct_t);
647 __private_extern__ void vs_start_write(vstruct_t);
648 __private_extern__ void vs_finish_write(vstruct_t);
649 __private_extern__ void vs_wait_for_writers(vstruct_t);
650 __private_extern__ void vs_wait_for_sync_writers(vstruct_t);
651 #else /* PARALLEL */
652 #define vs_lock(vs)
653 #define vs_unlock(vs)
654 #define vs_start_read(vs)
655 #define vs_wait_for_readers(vs)
656 #define vs_finish_read(vs)
657 #define vs_start_write(vs)
658 #define vs_wait_for_writers(vs)
659 #define vs_wait_for_sync_writers(vs)
660 #define vs_finish_write(vs)
661 #endif /* PARALLEL */
662
663 /*
664 * Data structures and variables dealing with asynchronous
665 * completion of paging operations.
666 */
667 /*
668 * vs_async
669 * A structure passed to ps_write_device for asynchronous completions.
670 * It contains enough information to complete the write and
671 * inform the VM of its completion.
672 */
673 struct vs_async {
674 struct vs_async *vsa_next; /* pointer to next structure */
675 vstruct_t vsa_vs; /* the vstruct for the object */
676 vm_offset_t vsa_addr; /* the vaddr of the data moved */
677 vm_offset_t vsa_offset; /* the object offset of the data */
678 vm_size_t vsa_size; /* the number of bytes moved */
679 paging_segment_t vsa_ps; /* the paging segment used */
680 int vsa_flags; /* flags */
681 int vsa_error; /* error, if there is one */
682 mutex_t vsa_lock;
683 MACH_PORT_FACE reply_port; /* associated reply port */
684 };
685
686 /*
687 * flags values.
688 */
689 #define VSA_READ 0x0001
690 #define VSA_WRITE 0x0002
691 #define VSA_TRANSFER 0x0004
692
693 /*
694 * List of all vstructs. A specific vstruct is
695 * found directly via its port, this list is
696 * only used for monitoring purposes by the
697 * default_pager_object* calls
698 */
699 struct vstruct_list_head {
700 queue_head_t vsl_queue;
701 #ifdef MACH_KERNEL
702 mutex_t vsl_lock;
703 #else
704 struct mutex vsl_lock;
705 #endif
706 int vsl_count; /* saves code */
707 };
708
709 __private_extern__ struct vstruct_list_head vstruct_list;
710
711 __private_extern__ void vstruct_list_insert(vstruct_t vs);
712 __private_extern__ void vstruct_list_delete(vstruct_t vs);
713
714
715 #ifdef MACH_KERNEL
716 #define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock, 0)
717 #else
718 #define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock)
719 #endif
720 #define VSL_LOCK() mutex_lock(&vstruct_list.vsl_lock)
721 #define VSL_LOCK_TRY() mutex_try(&vstruct_list.vsl_lock)
722 #define VSL_UNLOCK() mutex_unlock(&vstruct_list.vsl_lock)
723 #define VSL_SLEEP(e,i) thread_sleep_mutex((e), &vstruct_list.vsl_lock, (i))
724
725 #ifdef MACH_KERNEL
726 __private_extern__ zone_t vstruct_zone;
727 #endif
728
729 /*
730 * Create port alias for vstruct address.
731 *
732 * We assume that the last two bits of a vstruct address will be zero due to
733 * memory allocation restrictions, hence are available for use as a sanity
734 * check.
735 */
736 #ifdef MACH_KERNEL
737
738 #define ISVS ((int *)123456)
739 #define mem_obj_is_vs(_mem_obj_) \
740 (((_mem_obj_) != NULL) && ((_mem_obj_)->pager == ISVS))
741 #define mem_obj_to_vs(_mem_obj_) \
742 ((vstruct_t)(_mem_obj_))
743 #define vs_to_mem_obj(_vs_) ((memory_object_t)(_vs_))
744 #define vs_lookup(_mem_obj_, _vs_) \
745 do { \
746 if (!mem_obj_is_vs(_mem_obj_)) \
747 panic("bad dp memory object"); \
748 _vs_ = mem_obj_to_vs(_mem_obj_); \
749 } while (0)
750 #define vs_lookup_safe(_mem_obj_, _vs_) \
751 do { \
752 if (!mem_obj_is_vs(_mem_obj_)) \
753 _vs_ = VSTRUCT_NULL; \
754 else \
755 _vs_ = mem_obj_to_vs(_mem_obj_); \
756 } while (0)
757 #else
758
759 #define vs_to_port(_vs_) (((vm_offset_t)(_vs_))+1)
760 #define port_to_vs(_port_) ((vstruct_t)(((vm_offset_t)(_port_))&~3))
761 #define port_is_vs(_port_) ((((vm_offset_t)(_port_))&3) == 1)
762
763 #define vs_lookup(_port_, _vs_) \
764 do { \
765 if (!MACH_PORT_VALID(_port_) || !port_is_vs(_port_) \
766 || port_to_vs(_port_)->vs_mem_obj != (_port_)) \
767 Panic("bad pager port"); \
768 _vs_ = port_to_vs(_port_); \
769 } while (0)
770 #endif
771
772 /*
773 * Cross-module routines declaration.
774 */
775 #ifndef MACH_KERNEL
776 extern int dp_thread_id(void);
777 #endif
778 extern boolean_t device_reply_server(mach_msg_header_t *,
779 mach_msg_header_t *);
780 #ifdef MACH_KERNEL
781 extern boolean_t default_pager_no_senders(memory_object_t,
782 mach_port_mscount_t);
783 #else
784 extern void default_pager_no_senders(memory_object_t,
785 mach_port_seqno_t,
786 mach_port_mscount_t);
787 #endif
788
789 extern int local_log2(unsigned int);
790 extern void bs_initialize(void);
791 extern void bs_global_info(vm_size_t *,
792 vm_size_t *);
793 extern boolean_t bs_add_device(char *,
794 MACH_PORT_FACE);
795 extern vstruct_t ps_vstruct_create(vm_size_t);
796 extern void ps_vstruct_dealloc(vstruct_t);
797 extern kern_return_t pvs_cluster_read(vstruct_t,
798 vm_offset_t,
799 vm_size_t);
800 extern kern_return_t vs_cluster_write(vstruct_t,
801 upl_t,
802 upl_offset_t,
803 upl_size_t,
804 boolean_t,
805 int);
806 extern vm_offset_t ps_clmap(vstruct_t,
807 vm_offset_t,
808 struct clmap *,
809 int,
810 vm_size_t,
811 int);
812 extern vm_size_t ps_vstruct_allocated_size(vstruct_t);
813 extern size_t ps_vstruct_allocated_pages(vstruct_t,
814 default_pager_page_t *,
815 size_t);
816 extern boolean_t bs_set_default_clsize(unsigned int);
817
818 extern boolean_t verbose;
819
820 extern thread_call_t default_pager_backing_store_monitor_callout;
821 extern void default_pager_backing_store_monitor(thread_call_param_t, thread_call_param_t);
822
823 extern ipc_port_t max_pages_trigger_port;
824 extern unsigned int dp_pages_free;
825 extern unsigned int maximum_pages_free;
826
827 /* Do we know yet if swap files need to be encrypted ? */
828 extern boolean_t dp_encryption_inited;
829 /* Should we encrypt data before writing to swap ? */
830 extern boolean_t dp_encryption;
831
832 #endif /* _DEFAULT_PAGER_INTERNAL_H_ */