]> git.saurik.com Git - apple/xnu.git/blob - osfmk/default_pager/default_pager_internal.h
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / default_pager / default_pager_internal.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53
54 /*
55 * Default pager.
56 * General definitions.
57 */
58
59 #ifndef _DEFAULT_PAGER_INTERNAL_H_
60 #define _DEFAULT_PAGER_INTERNAL_H_
61
62 #include <default_pager/diag.h>
63 #include <default_pager/default_pager_types.h>
64 #include <mach/mach_types.h>
65 #include <ipc/ipc_port.h>
66 #include <ipc/ipc_types.h>
67 #include <ipc/ipc_space.h>
68 #include <kern/lock.h>
69 #include <kern/kalloc.h>
70 #include <kern/thread.h>
71 #include <vm/vm_kern.h>
72 #include <device/device_types.h>
73
74 /*
75 * Default option settings.
76 */
77 #ifndef PARALLEL
78 #define PARALLEL 1
79 #endif
80
81 #ifndef CHECKSUM
82 #define CHECKSUM 0
83 #endif
84
85 #define MACH_PORT_FACE mach_port_t
86
87 #if 0
88 #ifndef USE_PRECIOUS
89 #define USE_PRECIOUS TRUE
90 #endif
91 #endif
92
93 #ifdef USER_PAGER
94 #define UP(stuff) stuff
95 #else /* USER_PAGER */
96 #define UP(stuff)
97 #endif /* USER_PAGER */
98
99 #ifndef MACH_KERNEL
100 extern struct mutex dprintf_lock;
101 #define PRINTF_LOCK_INIT() mutex_init(&dprintf_lock)
102 #define PRINTF_LOCK() mutex_lock(&dprintf_lock)
103 #define PRINTF_UNLOCK() mutex_unlock(&dprintf_lock)
104 #endif
105
106 #ifndef MACH_KERNEL
107 #define dprintf(args) \
108 do { \
109 PRINTF_LOCK(); \
110 printf("%s[%d]: ", my_name, dp_thread_id()); \
111 printf args; \
112 PRINTF_UNLOCK(); \
113 } while (0)
114 #else
115 #define dprintf(args) \
116 do { \
117 printf("%s[KERNEL]: ", my_name); \
118 printf args; \
119 } while (0)
120 #endif
121
122 /*
123 * Debug.
124 */
125 __private_extern__ char my_name[];
126
127 #define DEFAULT_PAGER_DEBUG 0
128
129 #if DEFAULT_PAGER_DEBUG
130
131 extern int debug_mask;
132 #define DEBUG_MSG_EXTERNAL 0x00000001
133 #define DEBUG_MSG_INTERNAL 0x00000002
134 #define DEBUG_MO_EXTERNAL 0x00000100
135 #define DEBUG_MO_INTERNAL 0x00000200
136 #define DEBUG_VS_EXTERNAL 0x00010000
137 #define DEBUG_VS_INTERNAL 0x00020000
138 #define DEBUG_BS_EXTERNAL 0x01000000
139 #define DEBUG_BS_INTERNAL 0x02000000
140
141 #define DEBUG(level, args) \
142 do { \
143 if (debug_mask & (level)) \
144 dprintf(args); \
145 } while (0)
146
147 #define ASSERT(expr) \
148 do { \
149 if (!(expr)) \
150 #ifndef MACH_KERNEL
151 panic("%s[%d]%s: assertion failed in %s line %d: %s",\
152 my_name, dp_thread_id(), here, \
153 __FILE__, __LINE__, # expr); \
154 #else
155 panic("%s[KERNEL]: assertion failed in %s line %d: %s",\
156 my_name, __FILE__, __LINE__, # expr); \
157 #endif
158 } while (0)
159
160 #else /* DEFAULT_PAGER_DEBUG */
161
162 #define DEBUG(level, args)
163 #define ASSERT(clause)
164
165 #endif /* DEFAULT_PAGER_DEBUG */
166
167 #ifndef MACH_KERNEL
168 extern char *mach_error_string(kern_return_t);
169 #endif
170
171 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
172
173 #define PAGER_SUCCESS 0
174 #define PAGER_FULL 1
175 #define PAGER_ERROR 2
176
177 /*
178 * VM and IPC globals.
179 */
180 #ifdef MACH_KERNEL
181 #define vm_page_size page_size
182 extern vm_size_t page_size;
183 #else
184 extern vm_object_size_t vm_page_size;
185 #endif
186 extern unsigned long long vm_page_mask;
187 extern int vm_page_shift;
188
189 #ifndef MACH_KERNEL
190 #define ptoa(p) ((p)*vm_page_size)
191 #define atop(a) ((a)/vm_page_size)
192 #endif
193 #define howmany(a,b) (((a) + (b) - 1)/(b))
194
195 extern memory_object_default_t default_pager_object;
196
197 #ifdef MACH_KERNEL
198 extern mutex_t dpt_lock; /* Lock for the dpt array */
199 extern unsigned int default_pager_internal_count;
200 extern MACH_PORT_FACE default_pager_host_port;
201 /* extern task_t default_pager_self; */ /* dont need or want */
202 extern MACH_PORT_FACE default_pager_internal_set;
203 extern MACH_PORT_FACE default_pager_external_set;
204 extern MACH_PORT_FACE default_pager_default_set;
205 #else
206 extern mach_port_t default_pager_host_port;
207 extern task_port_t default_pager_self;
208 extern mach_port_t default_pager_internal_set;
209 extern mach_port_t default_pager_external_set;
210 extern mach_port_t default_pager_default_set;
211 #endif
212
213 typedef struct default_pager_thread {
214 #ifndef MACH_KERNEL
215 cthread_t dpt_thread; /* Server thread. */
216 #endif
217 vm_offset_t dpt_buffer; /* Read buffer. */
218 boolean_t dpt_internal; /* Do we handle internal objects? */
219 #ifndef MACH_KERNEL
220 int dpt_id; /* thread id for printf */
221 #else
222 int checked_out;
223 #endif
224 boolean_t dpt_initialized_p; /* Thread is ready for requests. */
225 } default_pager_thread_t;
226
227 #ifdef MACH_KERNEL
228 extern default_pager_thread_t **dpt_array;
229 #endif
230
231 /*
232 * Global statistics.
233 */
234 struct {
235 unsigned int gs_pageout_calls; /* # pageout calls */
236 unsigned int gs_pagein_calls; /* # pagein calls */
237 unsigned int gs_pages_in; /* # pages paged in (total) */
238 unsigned int gs_pages_out; /* # pages paged out (total) */
239 unsigned int gs_pages_unavail; /* # zero-fill pages */
240 unsigned int gs_pages_init; /* # page init requests */
241 unsigned int gs_pages_init_writes; /* # page init writes */
242 VSTATS_LOCK_DECL(gs_lock)
243 } global_stats;
244 #define GSTAT(clause) VSTATS_ACTION(&global_stats.gs_lock, (clause))
245
246 /*
247 * Cluster related definitions.
248 * Clusters are sized in number of pages per cluster.
249 * Cluster sizes must be powers of two.
250 *
251 * These numbers are related to the struct vs_map,
252 * defined below.
253 */
254 #define MAX_CLUSTER_SIZE 8
255 #define MAX_CLUSTER_SHIFT 3
256 #define NO_CLSIZE 0
257
258 /*
259 * bit map related macros
260 */
261 #define NBBY 8 /* bits per byte XXX */
262 #define BYTEMASK 0xff
263 #define setbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) |= 1<<((i)%NBBY))
264 #define clrbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) &= ~(1<<((i)%NBBY)))
265 #define isset(a,i) (*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY)))
266 #define isclr(a,i) ((*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY))) == 0)
267
268 /*
269 * Default Pager.
270 * Backing Store Management.
271 */
272
273 #define BS_MAXPRI 4
274 #define BS_MINPRI 0
275 #define BS_NOPRI -1
276 #define BS_FULLPRI -2
277
278 /*
279 * Mapping between backing store port and backing store object.
280 */
281 struct backing_store {
282 queue_chain_t bs_links; /* link in backing_store_list */
283 #ifdef MACH_KERNEL
284 mutex_t bs_lock; /* lock for the structure */
285 #else
286 struct mutex bs_lock; /* lock for the structure */
287 #endif
288 MACH_PORT_FACE bs_port; /* backing store port */
289 int bs_priority;
290 int bs_clsize; /* cluster size in pages */
291
292 /* statistics */
293 unsigned int bs_pages_free; /* # unallocated pages */
294 unsigned int bs_pages_total; /* # pages (total) */
295 unsigned int bs_pages_in; /* # page read requests */
296 unsigned int bs_pages_in_fail; /* # page read errors */
297 unsigned int bs_pages_out; /* # page write requests */
298 unsigned int bs_pages_out_fail; /* # page write errors */
299 };
300 typedef struct backing_store *backing_store_t;
301 #define BACKING_STORE_NULL ((backing_store_t) 0)
302 #define BS_STAT(bs, clause) VSTATS_ACTION(&(bs)->bs_lock, (clause))
303
304 #ifdef MACH_KERNEL
305 #define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock, ETAP_DPAGE_BS)
306 #else
307 #define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock)
308 #endif
309 #define BS_LOCK(bs) mutex_lock(&(bs)->bs_lock)
310 #define BS_UNLOCK(bs) mutex_unlock(&(bs)->bs_lock)
311
312 struct backing_store_list_head {
313 queue_head_t bsl_queue;
314 #ifdef MACH_KERNEL
315 mutex_t bsl_lock;
316 #else
317 struct mutex bsl_lock;
318 #endif
319 };
320 extern struct backing_store_list_head backing_store_list;
321 extern int backing_store_release_trigger_disable;
322
323 #ifdef MACH_KERNEL
324 #define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock, ETAP_DPAGE_BSL)
325 #else
326 #define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock)
327 #endif
328 #define BSL_LOCK() mutex_lock(&backing_store_list.bsl_lock)
329 #define BSL_UNLOCK() mutex_unlock(&backing_store_list.bsl_lock)
330
331 /*
332 * Paging segment management.
333 * Controls allocation of blocks within paging area.
334 */
335 struct paging_segment {
336 /* device management */
337 union {
338 MACH_PORT_FACE dev; /* Port to device */
339 struct vnode *vnode; /* vnode for bs file */
340 } storage_type;
341 unsigned int ps_segtype; /* file type or partition */
342 MACH_PORT_FACE ps_device; /* Port to device */
343 vm_offset_t ps_offset; /* Offset of segment within device */
344 vm_offset_t ps_recnum; /* Number of device records in segment*/
345 unsigned int ps_pgnum; /* Number of pages in segment */
346 unsigned int ps_record_shift;/* Bit shift: pages to device records */
347
348 /* clusters and pages */
349 unsigned int ps_clshift; /* Bit shift: clusters to pages */
350 unsigned int ps_ncls; /* Number of clusters in segment */
351 unsigned int ps_clcount; /* Number of free clusters */
352 unsigned int ps_pgcount; /* Number of free pages */
353 long ps_hint; /* Hint of where to look next. */
354
355 /* bitmap */
356 #ifdef MACH_KERNEL
357 mutex_t ps_lock; /* Lock for contents of struct */
358 #else
359 struct mutex ps_lock; /* Lock for contents of struct */
360 #endif
361 unsigned char *ps_bmap; /* Map of used clusters */
362
363 /* backing store */
364 backing_store_t ps_bs; /* Backing store segment belongs to */
365
366 boolean_t ps_going_away; /* Destroy attempt in progress */
367 };
368
369 #define ps_vnode storage_type.vnode
370 #define ps_device storage_type.dev
371 #define PS_PARTITION 1
372 #define PS_FILE 2
373
374 typedef struct paging_segment *paging_segment_t;
375
376 #define PAGING_SEGMENT_NULL ((paging_segment_t) 0)
377
378 #ifdef MACH_KERNEL
379 #define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock, ETAP_DPAGE_SEGMENT)
380 #else
381 #define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock)
382 #endif
383 #define PS_LOCK(ps) mutex_lock(&(ps)->ps_lock)
384 #define PS_UNLOCK(ps) mutex_unlock(&(ps)->ps_lock)
385
386 typedef unsigned int pseg_index_t;
387
388 #define INVALID_PSEG_INDEX ((pseg_index_t)-1)
389 #define NULL_PSEG_INDEX ((pseg_index_t) 0)
390 /*
391 * MAX_PSEG_INDEX value is related to struct vs_map below.
392 * "0" is reserved for empty map entries (no segment).
393 */
394 #define MAX_PSEG_INDEX 63 /* 0 is reserved for empty map */
395 #define MAX_NUM_PAGING_SEGMENTS MAX_PSEG_INDEX
396
397 /* paging segments array */
398 extern paging_segment_t paging_segments[MAX_NUM_PAGING_SEGMENTS];
399 #ifdef MACH_KERNEL
400 extern mutex_t paging_segments_lock;
401 #else
402 extern struct mutex paging_segments_lock;
403 #endif
404 extern int paging_segment_count; /* number of active paging segments */
405 extern int paging_segment_max; /* highest used paging segment index */
406 extern int ps_select_array[DEFAULT_PAGER_BACKING_STORE_MAXPRI+1];
407
408 #ifdef MACH_KERNEL
409 #define PSL_LOCK_INIT() mutex_init(&paging_segments_lock, ETAP_DPAGE_SEGLIST)
410 #else
411 #define PSL_LOCK_INIT() mutex_init(&paging_segments_lock)
412 #endif
413 #define PSL_LOCK() mutex_lock(&paging_segments_lock)
414 #define PSL_UNLOCK() mutex_unlock(&paging_segments_lock)
415
416 /*
417 * Vstruct manipulation. The vstruct is the pager's internal
418 * representation of vm objects it manages. There is one vstruct allocated
419 * per vm object.
420 *
421 * The following data structures are defined for vstruct and vm object
422 * management.
423 */
424
425 /*
426 * vs_map
427 * A structure used only for temporary objects. It is the element
428 * contained in the vs_clmap structure, which contains information
429 * about which clusters and pages in an object are present on backing
430 * store (a paging file).
431 * Note that this structure and its associated constants may change
432 * with minimal impact on code. The only function which knows the
433 * internals of this structure is ps_clmap().
434 *
435 * If it is necessary to change the maximum number of paging segments
436 * or pages in a cluster, then this structure is the one most
437 * affected. The constants and structures which *may* change are:
438 * MAX_CLUSTER_SIZE
439 * MAX_CLUSTER_SHIFT
440 * MAX_NUM_PAGING_SEGMENTS
441 * VSTRUCT_DEF_CLSHIFT
442 * struct vs_map and associated macros and constants (VSM_*)
443 * (only the macro definitions need change, the exported (inside the
444 * pager only) interfaces remain the same; the constants are for
445 * internal vs_map manipulation only).
446 * struct clbmap (below).
447 */
448 struct vs_map {
449 unsigned int vsmap_entry:23, /* offset in paging segment */
450 vsmap_psindex:8, /* paging segment */
451 vsmap_error:1,
452 vsmap_bmap:16,
453 vsmap_alloc:16;
454 };
455
456 typedef struct vs_map *vs_map_t;
457
458
459 #define VSM_ENTRY_NULL 0x7fffff
460
461 /*
462 * Exported macros for manipulating the vs_map structure --
463 * checking status, getting and setting bits.
464 */
465 #define VSCLSIZE(vs) (1 << (vs)->vs_clshift)
466 #define VSM_ISCLR(vsm) (((vsm).vsmap_entry == VSM_ENTRY_NULL) && \
467 ((vsm).vsmap_error == 0))
468 #define VSM_ISERR(vsm) ((vsm).vsmap_error)
469 #define VSM_SETCLOFF(vsm, val) ((vsm).vsmap_entry = (val))
470 #define VSM_SETERR(vsm, err) ((vsm).vsmap_error = 1, \
471 (vsm).vsmap_entry = (err))
472 #define VSM_GETERR(vsm) ((vsm).vsmap_entry)
473 #define VSM_SETPG(vsm, page) ((vsm).vsmap_bmap |= (1 << (page)))
474 #define VSM_CLRPG(vsm, page) ((vsm).vsmap_bmap &= ~(1 << (page)))
475 #define VSM_SETPS(vsm, psindx) ((vsm).vsmap_psindex = (psindx))
476 #define VSM_PSINDEX(vsm) ((vsm).vsmap_psindex)
477 #define VSM_PS(vsm) paging_segments[(vsm).vsmap_psindex]
478 #define VSM_BMAP(vsm) ((vsm).vsmap_bmap)
479 #define VSM_CLOFF(vsm) ((vsm).vsmap_entry)
480 #define VSM_CLR(vsm) ((vsm).vsmap_entry = VSM_ENTRY_NULL, \
481 (vsm).vsmap_psindex = 0, \
482 (vsm).vsmap_error = 0, \
483 (vsm).vsmap_bmap = 0, \
484 (vsm).vsmap_alloc = 0)
485 #define VSM_ALLOC(vsm) ((vsm).vsmap_alloc)
486 #define VSM_SETALLOC(vsm, page) ((vsm).vsmap_alloc |= (1 << (page)))
487 #define VSM_CLRALLOC(vsm, page) ((vsm).vsmap_alloc &= ~(1 << (page)))
488
489 /*
490 * Constants and macros for dealing with vstruct maps,
491 * which comprise vs_map structures, which
492 * map vm objects to backing storage (paging files and clusters).
493 */
494 #define CLMAP_THRESHOLD 512 /* bytes */
495 #define CLMAP_ENTRIES (CLMAP_THRESHOLD/sizeof(struct vs_map))
496 #define CLMAP_SIZE(ncls) (ncls*sizeof(struct vs_map))
497
498 #define INDIRECT_CLMAP_ENTRIES(ncls) (((ncls-1)/CLMAP_ENTRIES) + 1)
499 #define INDIRECT_CLMAP_SIZE(ncls) (INDIRECT_CLMAP_ENTRIES(ncls) * sizeof(struct vs_map *))
500 #define INDIRECT_CLMAP(size) (CLMAP_SIZE(size) > CLMAP_THRESHOLD)
501
502 #define RMAPSIZE(blocks) (howmany(blocks,NBBY))
503
504 #define CL_FIND 1
505 #define CL_ALLOC 2
506
507 /*
508 * clmap
509 *
510 * A cluster map returned by ps_clmap. It is an abstracted cluster of
511 * pages. It gives the caller information about the cluster
512 * desired. On read it tells the caller if a cluster is mapped, and if so,
513 * which of its pages are valid. It should not be referenced directly,
514 * except by ps_clmap; macros should be used. If the number of pages
515 * in a cluster needs to be more than 32, then the struct clbmap must
516 * become larger.
517 */
518 struct clbmap {
519 unsigned int clb_map;
520 };
521
522 struct clmap {
523 paging_segment_t cl_ps; /* paging segment backing cluster */
524 int cl_numpages; /* number of valid pages */
525 struct clbmap cl_bmap; /* map of pages in cluster */
526 int cl_error; /* cluster error value */
527 struct clbmap cl_alloc; /* map of allocated pages in cluster */
528 };
529
530 #define CLMAP_ERROR(clm) (clm).cl_error
531 #define CLMAP_PS(clm) (clm).cl_ps
532 #define CLMAP_NPGS(clm) (clm).cl_numpages
533 #define CLMAP_ISSET(clm,i) ((1<<(i))&((clm).cl_bmap.clb_map))
534 #define CLMAP_ALLOC(clm) (clm).cl_alloc.clb_map
535 /*
536 * Shift off unused bits in a partial cluster
537 */
538 #define CLMAP_SHIFT(clm,vs) \
539 (clm)->cl_bmap.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
540 #define CLMAP_SHIFTALLOC(clm,vs) \
541 (clm)->cl_alloc.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
542
543 typedef struct vstruct_alias {
544 int *name;
545 struct vstruct *vs;
546 } vstruct_alias_t;
547
548 #ifdef MACH_KERNEL
549 #define DPT_LOCK_INIT(lock) mutex_init(&(lock), ETAP_DPAGE_VSTRUCT)
550 #define DPT_LOCK(lock) mutex_lock(&(lock))
551 #define DPT_UNLOCK(lock) mutex_unlock(&(lock))
552 #define DPT_SLEEP(lock, e, i) thread_sleep_mutex(&(lock), (event_t)(e), i)
553 #define VS_LOCK_TYPE hw_lock_data_t
554 #define VS_LOCK_INIT(vs) hw_lock_init(&(vs)->vs_lock)
555 #define VS_TRY_LOCK(vs) (VS_LOCK(vs),TRUE)
556 #define VS_LOCK(vs) hw_lock_lock(&(vs)->vs_lock)
557 #define VS_UNLOCK(vs) hw_lock_unlock(&(vs)->vs_lock)
558 #define VS_MAP_LOCK_TYPE mutex_t
559 #define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock, ETAP_DPAGE_VSMAP)
560 #define VS_MAP_LOCK(vs) mutex_lock(&(vs)->vs_map_lock)
561 #define VS_MAP_TRY_LOCK(vs) mutex_try(&(vs)->vs_map_lock)
562 #define VS_MAP_UNLOCK(vs) mutex_unlock(&(vs)->vs_map_lock)
563 #else
564 #define VS_LOCK_TYPE struct mutex
565 #define VS_LOCK_INIT(vs) mutex_init(&(vs)->vs_lock, ETAP_DPAGE_VSTRUCT)
566 #define VS_TRY_LOCK(vs) mutex_try(&(vs)->vs_lock)
567 #define VS_LOCK(vs) mutex_lock(&(vs)->vs_lock)
568 #define VS_UNLOCK(vs) mutex_unlock(&(vs)->vs_lock)
569 #define VS_MAP_LOCK_TYPE struct mutex
570 #define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock)
571 #define VS_MAP_LOCK(vs) mutex_lock(&(vs)->vs_map_lock)
572 #define VS_MAP_TRY_LOCK(vs) mutex_try(&(vs)->vs_map_lock)
573 #define VS_MAP_UNLOCK(vs) mutex_unlock(&(vs)->vs_map_lock)
574 #endif
575
576
577 /*
578 * VM Object Structure: This is the structure used to manage
579 * default pager object associations with their control counter-
580 * parts (VM objects).
581 */
582 typedef struct vstruct {
583 int *vs_mem_obj; /* our memory obj - temp */
584 int vs_mem_obj_ikot;/* JMM:fake ip_kotype() */
585 memory_object_control_t vs_control; /* our mem obj control ref */
586 VS_LOCK_TYPE vs_lock; /* data for the lock */
587
588 /* JMM - Could combine these first two in a single pending count now */
589 unsigned int vs_next_seqno; /* next sequence num to issue */
590 unsigned int vs_seqno; /* Pager port sequence number */
591 unsigned int vs_readers; /* Reads in progress */
592 unsigned int vs_writers; /* Writes in progress */
593
594 #ifdef MACH_KERNEL
595 int
596 /* boolean_t */ vs_waiting_seqno:1, /* to wait on seqno */
597 /* boolean_t */ vs_waiting_read:1, /* waiting on reader? */
598 /* boolean_t */ vs_waiting_write:1, /* waiting on writer? */
599 /* boolean_t */ vs_waiting_async:1, /* waiting on async? */
600 /* boolean_t */ vs_indirect:1, /* map indirect? */
601 /* boolean_t */ vs_xfer_pending:1; /* xfer out of seg? */
602 #else
603 event_t vs_waiting_seqno;/* to wait on seqno */
604 event_t vs_waiting_read; /* to wait on readers */
605 event_t vs_waiting_write;/* to wait on writers */
606 event_t vs_waiting_async;/* to wait on async_pending */
607 int vs_indirect:1, /* Is the map indirect ? */
608 vs_xfer_pending:1; /* xfering out of a seg ? */
609 #endif
610
611 unsigned int vs_async_pending;/* pending async write count */
612 unsigned int vs_errors; /* Pageout error count */
613 unsigned int vs_references; /* references */
614
615 queue_chain_t vs_links; /* Link in pager-wide list */
616
617 int vs_clshift; /* Bit shift: clusters->pages */
618 int vs_size; /* Object size in clusters */
619 #ifdef MACH_KERNEL
620 mutex_t vs_map_lock; /* to protect map below */
621 #else
622 struct mutex vs_map_lock; /* to protect map below */
623 #endif
624 union {
625 struct vs_map *vsu_dmap; /* Direct map of clusters */
626 struct vs_map **vsu_imap; /* Indirect map of clusters */
627 } vs_un;
628 } *vstruct_t;
629
630 #define vs_dmap vs_un.vsu_dmap
631 #define vs_imap vs_un.vsu_imap
632
633 #define VSTRUCT_NULL ((vstruct_t) 0)
634
635 __private_extern__ void vs_async_wait(vstruct_t);
636
637 #if PARALLEL
638 __private_extern__ void vs_lock(vstruct_t);
639 __private_extern__ void vs_unlock(vstruct_t);
640 __private_extern__ void vs_start_read(vstruct_t);
641 __private_extern__ void vs_finish_read(vstruct_t);
642 __private_extern__ void vs_wait_for_readers(vstruct_t);
643 __private_extern__ void vs_start_write(vstruct_t);
644 __private_extern__ void vs_finish_write(vstruct_t);
645 __private_extern__ void vs_wait_for_writers(vstruct_t);
646 #else /* PARALLEL */
647 #define vs_lock(vs)
648 #define vs_unlock(vs)
649 #define vs_start_read(vs)
650 #define vs_wait_for_readers(vs)
651 #define vs_finish_read(vs)
652 #define vs_start_write(vs)
653 #define vs_wait_for_writers(vs)
654 #define vs_wait_for_sync_writers(vs)
655 #define vs_finish_write(vs)
656 #endif /* PARALLEL */
657
658 /*
659 * Data structures and variables dealing with asynchronous
660 * completion of paging operations.
661 */
662 /*
663 * vs_async
664 * A structure passed to ps_write_device for asynchronous completions.
665 * It contains enough information to complete the write and
666 * inform the VM of its completion.
667 */
668 struct vs_async {
669 struct vs_async *vsa_next; /* pointer to next structure */
670 vstruct_t vsa_vs; /* the vstruct for the object */
671 vm_offset_t vsa_addr; /* the vaddr of the data moved */
672 vm_offset_t vsa_offset; /* the object offset of the data */
673 vm_size_t vsa_size; /* the number of bytes moved */
674 paging_segment_t vsa_ps; /* the paging segment used */
675 int vsa_flags; /* flags */
676 int vsa_error; /* error, if there is one */
677 mutex_t vsa_lock;
678 MACH_PORT_FACE reply_port; /* associated reply port */
679 };
680
681 /*
682 * flags values.
683 */
684 #define VSA_READ 0x0001
685 #define VSA_WRITE 0x0002
686 #define VSA_TRANSFER 0x0004
687
688 /*
689 * List of all vstructs. A specific vstruct is
690 * found directly via its port, this list is
691 * only used for monitoring purposes by the
692 * default_pager_object* calls
693 */
694 struct vstruct_list_head {
695 queue_head_t vsl_queue;
696 #ifdef MACH_KERNEL
697 mutex_t vsl_lock;
698 #else
699 struct mutex vsl_lock;
700 #endif
701 int vsl_count; /* saves code */
702 };
703
704 __private_extern__ struct vstruct_list_head vstruct_list;
705
706 __private_extern__ void vstruct_list_insert(vstruct_t vs);
707 __private_extern__ void vstruct_list_delete(vstruct_t vs);
708
709
710 #ifdef MACH_KERNEL
711 #define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock, ETAP_DPAGE_VSLIST)
712 #else
713 #define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock)
714 #endif
715 #define VSL_LOCK() mutex_lock(&vstruct_list.vsl_lock)
716 #define VSL_LOCK_TRY() mutex_try(&vstruct_list.vsl_lock)
717 #define VSL_UNLOCK() mutex_unlock(&vstruct_list.vsl_lock)
718 #define VSL_SLEEP(e,i) thread_sleep_mutex((e), &vstruct_list.vsl_lock, (i))
719
720 #ifdef MACH_KERNEL
721 __private_extern__ zone_t vstruct_zone;
722 #endif
723
724 /*
725 * Create port alias for vstruct address.
726 *
727 * We assume that the last two bits of a vstruct address will be zero due to
728 * memory allocation restrictions, hence are available for use as a sanity
729 * check.
730 */
731 #ifdef MACH_KERNEL
732
733 #define ISVS ((int *)123456)
734 #define mem_obj_is_vs(_mem_obj_) \
735 (((_mem_obj_) != NULL) && ((_mem_obj_)->pager == ISVS))
736 #define mem_obj_to_vs(_mem_obj_) \
737 ((vstruct_t)(_mem_obj_))
738 #define vs_to_mem_obj(_vs_) ((memory_object_t)(_vs_))
739 #define vs_lookup(_mem_obj_, _vs_) \
740 do { \
741 if (!mem_obj_is_vs(_mem_obj_)) \
742 panic("bad dp memory object"); \
743 _vs_ = mem_obj_to_vs(_mem_obj_); \
744 } while (0)
745 #define vs_lookup_safe(_mem_obj_, _vs_) \
746 do { \
747 if (!mem_obj_is_vs(_mem_obj_)) \
748 _vs_ = VSTRUCT_NULL; \
749 else \
750 _vs_ = mem_obj_to_vs(_mem_obj_); \
751 } while (0)
752 #else
753
754 #define vs_to_port(_vs_) (((vm_offset_t)(_vs_))+1)
755 #define port_to_vs(_port_) ((vstruct_t)(((vm_offset_t)(_port_))&~3))
756 #define port_is_vs(_port_) ((((vm_offset_t)(_port_))&3) == 1)
757
758 #define vs_lookup(_port_, _vs_) \
759 do { \
760 if (!MACH_PORT_VALID(_port_) || !port_is_vs(_port_) \
761 || port_to_vs(_port_)->vs_mem_obj != (_port_)) \
762 Panic("bad pager port"); \
763 _vs_ = port_to_vs(_port_); \
764 } while (0)
765 #endif
766
767 /*
768 * Cross-module routines declaration.
769 */
770 #ifndef MACH_KERNEL
771 extern int dp_thread_id(void);
772 #endif
773 extern boolean_t device_reply_server(mach_msg_header_t *,
774 mach_msg_header_t *);
775 #ifdef MACH_KERNEL
776 extern boolean_t default_pager_no_senders(memory_object_t,
777 mach_port_mscount_t);
778 #else
779 extern void default_pager_no_senders(memory_object_t,
780 mach_port_seqno_t,
781 mach_port_mscount_t);
782 #endif
783
784 extern int local_log2(unsigned int);
785 extern void bs_initialize(void);
786 extern void bs_global_info(vm_size_t *,
787 vm_size_t *);
788 extern boolean_t bs_add_device(char *,
789 MACH_PORT_FACE);
790 extern vstruct_t ps_vstruct_create(vm_size_t);
791 extern void ps_vstruct_dealloc(vstruct_t);
792 extern kern_return_t pvs_cluster_read(vstruct_t,
793 vm_offset_t,
794 vm_size_t);
795 extern kern_return_t vs_cluster_write(vstruct_t,
796 upl_t,
797 vm_offset_t,
798 vm_size_t,
799 boolean_t,
800 int);
801 extern vm_offset_t ps_clmap(vstruct_t,
802 vm_offset_t,
803 struct clmap *,
804 int,
805 vm_size_t,
806 int);
807 extern vm_size_t ps_vstruct_allocated_size(vstruct_t);
808 extern size_t ps_vstruct_allocated_pages(vstruct_t,
809 default_pager_page_t *,
810 size_t);
811 extern boolean_t bs_set_default_clsize(unsigned int);
812
813 extern boolean_t verbose;
814
815 #endif /* _DEFAULT_PAGER_INTERNAL_H_ */