]> git.saurik.com Git - apple/xnu.git/blob - osfmk/default_pager/default_pager_internal.h
a2ac7d6106d2b5f74e9bd8911fd65b2d81294f49
[apple/xnu.git] / osfmk / default_pager / default_pager_internal.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51 /*
52 * Default pager.
53 * General definitions.
54 */
55
56 #ifndef _DEFAULT_PAGER_INTERNAL_H_
57 #define _DEFAULT_PAGER_INTERNAL_H_
58
59 #include <default_pager/diag.h>
60 #include <default_pager/default_pager_types.h>
61 #include <mach/mach_types.h>
62 #include <ipc/ipc_port.h>
63 #include <ipc/ipc_types.h>
64 #include <ipc/ipc_space.h>
65 #include <kern/lock.h>
66 #include <kern/kalloc.h>
67 #include <kern/thread.h>
68 #include <vm/vm_kern.h>
69 #include <device/device_types.h>
70
71 /*
72 * Default option settings.
73 */
74 #ifndef PARALLEL
75 #define PARALLEL 1
76 #endif
77
78 #ifndef CHECKSUM
79 #define CHECKSUM 0
80 #endif
81
82 #define MACH_PORT_FACE mach_port_t
83
84 #ifndef USE_PRECIOUS
85 #define USE_PRECIOUS TRUE
86 #endif
87
88 #ifdef USER_PAGER
89 #define UP(stuff) stuff
90 #else /* USER_PAGER */
91 #define UP(stuff)
92 #endif /* USER_PAGER */
93
94 extern int norma_mk; /* is the kernel configured with NORMA ? */
95
96 #ifndef MACH_KERNEL
97 extern struct mutex dprintf_lock;
98 #define PRINTF_LOCK_INIT() mutex_init(&dprintf_lock)
99 #define PRINTF_LOCK() mutex_lock(&dprintf_lock)
100 #define PRINTF_UNLOCK() mutex_unlock(&dprintf_lock)
101 #endif
102
103 #ifndef MACH_KERNEL
104 #define dprintf(args) \
105 do { \
106 PRINTF_LOCK(); \
107 printf("%s[%d]%s: ", my_name, dp_thread_id(), here); \
108 printf args; \
109 PRINTF_UNLOCK(); \
110 } while (0)
111 #else
112 #define dprintf(args) \
113 do { \
114 printf("%s[KERNEL:]%s: ", my_name, here); \
115 printf args; \
116 } while (0)
117 #endif
118
119 /*
120 * Debug.
121 */
122 extern char my_name[];
123
124 #define DEFAULT_PAGER_DEBUG 0
125
126 #if DEFAULT_PAGER_DEBUG
127
128 extern int debug_mask;
129 #define DEBUG_MSG_EXTERNAL 0x00000001
130 #define DEBUG_MSG_INTERNAL 0x00000002
131 #define DEBUG_MO_EXTERNAL 0x00000100
132 #define DEBUG_MO_INTERNAL 0x00000200
133 #define DEBUG_VS_EXTERNAL 0x00010000
134 #define DEBUG_VS_INTERNAL 0x00020000
135 #define DEBUG_BS_EXTERNAL 0x01000000
136 #define DEBUG_BS_INTERNAL 0x02000000
137
138 #define DEBUG(level, args) \
139 do { \
140 if (debug_mask & (level)) \
141 dprintf(args); \
142 } while (0)
143
144 #define ASSERT(expr) \
145 do { \
146 if (!(expr)) \
147 #ifndef MACH_KERNEL
148 panic("%s[%d]%s: assertion failed in %s line %d: %s",\
149 my_name, dp_thread_id(), here, \
150 __FILE__, __LINE__, # expr); \
151 #else
152 panic("%s[KERNEL]%s: assertion failed in %s line %d: %s",\
153 my_name, here, __FILE__, __LINE__, # expr); \
154 #endif
155 } while (0)
156
157 #else /* DEFAULT_PAGER_DEBUG */
158
159 #define DEBUG(level, args) here[0] = here[0]
160 #define ASSERT(clause) here[0] = here[0]
161
162 #endif /* DEFAULT_PAGER_DEBUG */
163
164 #ifndef MACH_KERNEL
165 extern char *mach_error_string(kern_return_t);
166 #endif
167
168 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
169
170 #define PAGER_SUCCESS 0
171 #define PAGER_FULL 1
172 #define PAGER_ERROR 2
173
174 /*
175 * VM and IPC globals.
176 */
177 #ifdef MACH_KERNEL
178 #define vm_page_size page_size
179 extern vm_size_t page_size;
180 #else
181 extern vm_object_size_t vm_page_size;
182 #endif
183 extern unsigned long long vm_page_mask;
184 extern int vm_page_shift;
185
186 #ifndef MACH_KERNEL
187 #define ptoa(p) ((p)*vm_page_size)
188 #define atop(a) ((a)/vm_page_size)
189 #endif
190 #define howmany(a,b) (((a) + (b) - 1)/(b))
191
192 #ifdef MACH_KERNEL
193 extern mutex_t dpt_lock; /* Lock for the dpt array */
194 extern unsigned int default_pager_internal_count;
195 extern MACH_PORT_FACE default_pager_host_port;
196 /* extern task_t default_pager_self; */ /* dont need or want */
197 extern MACH_PORT_FACE default_pager_internal_set;
198 extern MACH_PORT_FACE default_pager_external_set;
199 extern MACH_PORT_FACE default_pager_default_port;
200 extern MACH_PORT_FACE default_pager_default_set;
201 #else
202 extern mach_port_t default_pager_host_port;
203 extern task_port_t default_pager_self;
204 extern mach_port_t default_pager_internal_set;
205 extern mach_port_t default_pager_external_set;
206 extern mach_port_t default_pager_default_port;
207 extern mach_port_t default_pager_default_set;
208 #endif
209
210 typedef struct default_pager_thread {
211 #ifndef MACH_KERNEL
212 cthread_t dpt_thread; /* Server thread. */
213 #endif
214 vm_offset_t dpt_buffer; /* Read buffer. */
215 boolean_t dpt_internal; /* Do we handle internal objects? */
216 #ifndef MACH_KERNEL
217 int dpt_id; /* thread id for printf */
218 #else
219 int checked_out;
220 #endif
221 boolean_t dpt_initialized_p; /* Thread is ready for requests. */
222 } default_pager_thread_t;
223
224 #ifdef MACH_KERNEL
225 extern default_pager_thread_t **dpt_array;
226 #endif
227
228 /*
229 * Global statistics.
230 */
231 struct {
232 unsigned int gs_pageout_calls; /* # pageout calls */
233 unsigned int gs_pagein_calls; /* # pagein calls */
234 unsigned int gs_pages_in; /* # pages paged in (total) */
235 unsigned int gs_pages_out; /* # pages paged out (total) */
236 unsigned int gs_pages_unavail; /* # zero-fill pages */
237 unsigned int gs_pages_init; /* # page init requests */
238 unsigned int gs_pages_init_writes; /* # page init writes */
239 VSTATS_LOCK_DECL(gs_lock)
240 } global_stats;
241 #define GSTAT(clause) VSTATS_ACTION(&global_stats.gs_lock, (clause))
242
243 /*
244 * Cluster related definitions.
245 * Clusters are sized in number of pages per cluster.
246 * Cluster sizes must be powers of two.
247 *
248 * These numbers are related to the struct vs_map,
249 * defined below.
250 */
251 #define MAX_CLUSTER_SIZE 8
252 #define MAX_CLUSTER_SHIFT 3
253 #define NO_CLSIZE 0
254
255 /*
256 * bit map related macros
257 */
258 #define NBBY 8 /* bits per byte XXX */
259 #define BYTEMASK 0xff
260 #define setbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) |= 1<<((i)%NBBY))
261 #define clrbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) &= ~(1<<((i)%NBBY)))
262 #define isset(a,i) (*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY)))
263 #define isclr(a,i) ((*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY))) == 0)
264
265 /*
266 * Default Pager.
267 * Backing Store Management.
268 */
269
270 #define BS_MAXPRI 4
271 #define BS_MINPRI 0
272 #define BS_NOPRI -1
273 #define BS_FULLPRI -2
274
275 /*
276 * Mapping between backing store port and backing store object.
277 */
278 struct backing_store {
279 queue_chain_t bs_links; /* link in backing_store_list */
280 #ifdef MACH_KERNEL
281 mutex_t bs_lock; /* lock for the structure */
282 #else
283 struct mutex bs_lock; /* lock for the structure */
284 #endif
285 MACH_PORT_FACE bs_port; /* backing store port */
286 int bs_priority;
287 int bs_clsize; /* cluster size in pages */
288
289 /* statistics */
290 unsigned int bs_pages_free; /* # unallocated pages */
291 unsigned int bs_pages_total; /* # pages (total) */
292 unsigned int bs_pages_in; /* # page read requests */
293 unsigned int bs_pages_in_fail; /* # page read errors */
294 unsigned int bs_pages_out; /* # page write requests */
295 unsigned int bs_pages_out_fail; /* # page write errors */
296 };
297 typedef struct backing_store *backing_store_t;
298 #define BACKING_STORE_NULL ((backing_store_t) 0)
299 #define BS_STAT(bs, clause) VSTATS_ACTION(&(bs)->bs_lock, (clause))
300
301 #ifdef MACH_KERNEL
302 #define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock, ETAP_DPAGE_BS)
303 #else
304 #define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock)
305 #endif
306 #define BS_LOCK(bs) mutex_lock(&(bs)->bs_lock)
307 #define BS_UNLOCK(bs) mutex_unlock(&(bs)->bs_lock)
308
309 struct backing_store_list_head {
310 queue_head_t bsl_queue;
311 #ifdef MACH_KERNEL
312 mutex_t bsl_lock;
313 #else
314 struct mutex bsl_lock;
315 #endif
316 };
317 extern struct backing_store_list_head backing_store_list;
318
319 #ifdef MACH_KERNEL
320 #define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock, ETAP_DPAGE_BSL)
321 #else
322 #define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock)
323 #endif
324 #define BSL_LOCK() mutex_lock(&backing_store_list.bsl_lock)
325 #define BSL_UNLOCK() mutex_unlock(&backing_store_list.bsl_lock)
326
327 /*
328 * Paging segment management.
329 * Controls allocation of blocks within paging area.
330 */
331 struct paging_segment {
332 /* device management */
333 union {
334 MACH_PORT_FACE dev; /* Port to device */
335 struct vnode *vnode; /* vnode for bs file */
336 } storage_type;
337 unsigned int ps_segtype; /* file type or partition */
338 MACH_PORT_FACE ps_device; /* Port to device */
339 vm_offset_t ps_offset; /* Offset of segment within device */
340 vm_offset_t ps_recnum; /* Number of device records in segment*/
341 unsigned int ps_pgnum; /* Number of pages in segment */
342 unsigned int ps_record_shift;/* Bit shift: pages to device records */
343
344 /* clusters and pages */
345 unsigned int ps_clshift; /* Bit shift: clusters to pages */
346 unsigned int ps_ncls; /* Number of clusters in segment */
347 unsigned int ps_clcount; /* Number of free clusters */
348 unsigned int ps_pgcount; /* Number of free pages */
349 long ps_hint; /* Hint of where to look next. */
350
351 /* bitmap */
352 #ifdef MACH_KERNEL
353 mutex_t ps_lock; /* Lock for contents of struct */
354 #else
355 struct mutex ps_lock; /* Lock for contents of struct */
356 #endif
357 unsigned char *ps_bmap; /* Map of used clusters */
358
359 /* backing store */
360 backing_store_t ps_bs; /* Backing store segment belongs to */
361
362 boolean_t ps_going_away; /* Destroy attempt in progress */
363 };
364
365 #define ps_vnode storage_type.vnode
366 #define ps_device storage_type.dev
367 #define PS_PARTITION 1
368 #define PS_FILE 2
369
370 typedef struct paging_segment *paging_segment_t;
371
372 #define PAGING_SEGMENT_NULL ((paging_segment_t) 0)
373
374 #ifdef MACH_KERNEL
375 #define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock, ETAP_DPAGE_SEGMENT)
376 #else
377 #define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock)
378 #endif
379 #define PS_LOCK(ps) mutex_lock(&(ps)->ps_lock)
380 #define PS_UNLOCK(ps) mutex_unlock(&(ps)->ps_lock)
381
382 typedef unsigned int pseg_index_t;
383
384 #define INVALID_PSEG_INDEX ((pseg_index_t)-1)
385 #define NULL_PSEG_INDEX ((pseg_index_t) 0)
386 /*
387 * MAX_PSEG_INDEX value is related to struct vs_map below.
388 * "0" is reserved for empty map entries (no segment).
389 */
390 #define MAX_PSEG_INDEX 63 /* 0 is reserved for empty map */
391 #define MAX_NUM_PAGING_SEGMENTS MAX_PSEG_INDEX
392
393 /* paging segments array */
394 extern paging_segment_t paging_segments[MAX_NUM_PAGING_SEGMENTS];
395 #ifdef MACH_KERNEL
396 extern mutex_t paging_segments_lock;
397 #else
398 extern struct mutex paging_segments_lock;
399 #endif
400 extern int paging_segment_count; /* number of active paging segments */
401 extern int paging_segment_max; /* highest used paging segment index */
402 extern int ps_select_array[DEFAULT_PAGER_BACKING_STORE_MAXPRI+1];
403
404 #ifdef MACH_KERNEL
405 #define PSL_LOCK_INIT() mutex_init(&paging_segments_lock, ETAP_DPAGE_SEGLIST)
406 #else
407 #define PSL_LOCK_INIT() mutex_init(&paging_segments_lock)
408 #endif
409 #define PSL_LOCK() mutex_lock(&paging_segments_lock)
410 #define PSL_UNLOCK() mutex_unlock(&paging_segments_lock)
411
412 /*
413 * Vstruct manipulation. The vstruct is the pager's internal
414 * representation of vm objects it manages. There is one vstruct allocated
415 * per vm object.
416 *
417 * The following data structures are defined for vstruct and vm object
418 * management.
419 */
420
421 /*
422 * vs_map
423 * A structure used only for temporary objects. It is the element
424 * contained in the vs_clmap structure, which contains information
425 * about which clusters and pages in an object are present on backing
426 * store (a paging file).
427 * Note that this structure and its associated constants may change
428 * with minimal impact on code. The only function which knows the
429 * internals of this structure is ps_clmap().
430 *
431 * If it is necessary to change the maximum number of paging segments
432 * or pages in a cluster, then this structure is the one most
433 * affected. The constants and structures which *may* change are:
434 * MAX_CLUSTER_SIZE
435 * MAX_CLUSTER_SHIFT
436 * MAX_NUM_PAGING_SEGMENTS
437 * VSTRUCT_DEF_CLSHIFT
438 * struct vs_map and associated macros and constants (VSM_*)
439 * (only the macro definitions need change, the exported (inside the
440 * pager only) interfaces remain the same; the constants are for
441 * internal vs_map manipulation only).
442 * struct clbmap (below).
443 */
444 struct vs_map {
445 unsigned int vsmap_entry:23, /* offset in paging segment */
446 vsmap_psindex:8, /* paging segment */
447 vsmap_error:1,
448 vsmap_bmap:16,
449 vsmap_alloc:16;
450 };
451
452 typedef struct vs_map *vs_map_t;
453
454
455 #define VSM_ENTRY_NULL 0x7fffff
456
457 /*
458 * Exported macros for manipulating the vs_map structure --
459 * checking status, getting and setting bits.
460 */
461 #define VSCLSIZE(vs) (1 << (vs)->vs_clshift)
462 #define VSM_ISCLR(vsm) (((vsm).vsmap_entry == VSM_ENTRY_NULL) && \
463 ((vsm).vsmap_error == 0))
464 #define VSM_ISERR(vsm) ((vsm).vsmap_error)
465 #define VSM_SETCLOFF(vsm, val) ((vsm).vsmap_entry = (val))
466 #define VSM_SETERR(vsm, err) ((vsm).vsmap_error = 1, \
467 (vsm).vsmap_entry = (err))
468 #define VSM_GETERR(vsm) ((vsm).vsmap_entry)
469 #define VSM_SETPG(vsm, page) ((vsm).vsmap_bmap |= (1 << (page)))
470 #define VSM_CLRPG(vsm, page) ((vsm).vsmap_bmap &= ~(1 << (page)))
471 #define VSM_SETPS(vsm, psindx) ((vsm).vsmap_psindex = (psindx))
472 #define VSM_PSINDEX(vsm) ((vsm).vsmap_psindex)
473 #define VSM_PS(vsm) paging_segments[(vsm).vsmap_psindex]
474 #define VSM_BMAP(vsm) ((vsm).vsmap_bmap)
475 #define VSM_CLOFF(vsm) ((vsm).vsmap_entry)
476 #define VSM_CLR(vsm) ((vsm).vsmap_entry = VSM_ENTRY_NULL, \
477 (vsm).vsmap_psindex = 0, \
478 (vsm).vsmap_error = 0, \
479 (vsm).vsmap_bmap = 0, \
480 (vsm).vsmap_alloc = 0)
481 #define VSM_ALLOC(vsm) ((vsm).vsmap_alloc)
482 #define VSM_SETALLOC(vsm, page) ((vsm).vsmap_alloc |= (1 << (page)))
483 #define VSM_CLRALLOC(vsm, page) ((vsm).vsmap_alloc &= ~(1 << (page)))
484
485 /*
486 * Constants and macros for dealing with vstruct maps,
487 * which comprise vs_map structures, which
488 * map vm objects to backing storage (paging files and clusters).
489 */
490 #define CLMAP_THRESHOLD 512 /* bytes */
491 #define CLMAP_ENTRIES (CLMAP_THRESHOLD/sizeof(struct vs_map))
492 #define CLMAP_SIZE(ncls) (ncls*sizeof(struct vs_map))
493
494 #define INDIRECT_CLMAP_ENTRIES(ncls) (((ncls-1)/CLMAP_ENTRIES) + 1)
495 #define INDIRECT_CLMAP_SIZE(ncls) (INDIRECT_CLMAP_ENTRIES(ncls) * sizeof(struct vs_map *))
496 #define INDIRECT_CLMAP(size) (CLMAP_SIZE(size) > CLMAP_THRESHOLD)
497
498 #define RMAPSIZE(blocks) (howmany(blocks,NBBY))
499
500 #define CL_FIND 1
501 #define CL_ALLOC 2
502
503 /*
504 * clmap
505 *
506 * A cluster map returned by ps_clmap. It is an abstracted cluster of
507 * pages. It gives the caller information about the cluster
508 * desired. On read it tells the caller if a cluster is mapped, and if so,
509 * which of its pages are valid. It should not be referenced directly,
510 * except by ps_clmap; macros should be used. If the number of pages
511 * in a cluster needs to be more than 32, then the struct clbmap must
512 * become larger.
513 */
514 struct clbmap {
515 unsigned int clb_map;
516 };
517
518 struct clmap {
519 paging_segment_t cl_ps; /* paging segment backing cluster */
520 int cl_numpages; /* number of valid pages */
521 struct clbmap cl_bmap; /* map of pages in cluster */
522 int cl_error; /* cluster error value */
523 struct clbmap cl_alloc; /* map of allocated pages in cluster */
524 };
525
526 #define CLMAP_ERROR(clm) (clm).cl_error
527 #define CLMAP_PS(clm) (clm).cl_ps
528 #define CLMAP_NPGS(clm) (clm).cl_numpages
529 #define CLMAP_ISSET(clm,i) ((1<<(i))&((clm).cl_bmap.clb_map))
530 #define CLMAP_ALLOC(clm) (clm).cl_alloc.clb_map
531 /*
532 * Shift off unused bits in a partial cluster
533 */
534 #define CLMAP_SHIFT(clm,vs) \
535 (clm)->cl_bmap.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
536 #define CLMAP_SHIFTALLOC(clm,vs) \
537 (clm)->cl_alloc.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
538
539 typedef struct vstruct_alias {
540 vm_offset_t name;
541 struct vstruct *vs;
542 } vstruct_alias_t;
543
544 /*
545 * VM Object Structure: This is the structure used to manage pagers associated
546 * to VM objects.Mapping between pager port and paging object.
547 */
548
549 typedef struct vstruct {
550 queue_chain_t vs_links; /* Link in pager-port list */
551 #ifdef MACH_KERNEL
552 hw_lock_data_t vs_lock; /* Lock for the structure */
553 #else
554 struct mutex vs_lock; /* Lock for the structure */
555 #endif
556 MACH_PORT_FACE vs_mem_obj_port; /* Memory object port */
557 mach_port_seqno_t vs_next_seqno; /* next sequence number to issue */
558 mach_port_seqno_t vs_seqno; /* Pager port sequence number */
559 MACH_PORT_FACE vs_control_port;/* Memory object's control port */
560 mach_port_urefs_t vs_control_refs; /* Mem object's control port refs */
561 MACH_PORT_FACE vs_object_name; /* Name port */
562 mach_port_urefs_t vs_name_refs; /* Name port user-refs */
563
564 #ifdef MACH_KERNEL
565 boolean_t vs_waiting_seqno;/* to wait on seqno */
566 boolean_t vs_waiting_read; /* to wait on readers */
567 boolean_t vs_waiting_write;/* to wait on writers */
568 boolean_t vs_waiting_refs; /* to wait on refs */
569 boolean_t vs_waiting_async;/* to wait on async_pending */
570 #else
571 event_t vs_waiting_seqno;/* to wait on seqno */
572 event_t vs_waiting_read; /* to wait on readers */
573 event_t vs_waiting_write;/* to wait on writers */
574 event_t vs_waiting_refs; /* to wait on refs */
575 event_t vs_waiting_async;/* to wait on async_pending */
576 #endif
577 unsigned int vs_readers; /* Reads in progress */
578 unsigned int vs_writers; /* Writes in progress */
579
580 unsigned int vs_errors; /* Pageout error count */
581
582 int vs_clshift; /* Bit shift: clusters to pages */
583 int vs_size; /* Object size in clusters */
584 int vs_indirect:1, /* Is the map indirect ? */
585 vs_xfer_pending:1; /* xfering out of a seg ? */
586 int vs_async_pending; /* Count of pending async writes */
587 #ifdef MACH_KERNEL
588 mutex_t vs_map_lock; /* to protect map below */
589 #else
590 struct mutex vs_map_lock; /* to protect map below */
591 #endif
592 union {
593 struct vs_map *vsu_dmap; /* Direct map of clusters */
594 struct vs_map **vsu_imap; /* Indirect map of clusters */
595 } vs_un;
596 } *vstruct_t;
597
598 #define vs_dmap vs_un.vsu_dmap
599 #define vs_imap vs_un.vsu_imap
600 #define MEM_OBJ_CTL(vs) ((vs)->vs_control_port)
601
602 #define VSTRUCT_NULL ((vstruct_t) 0)
603
604 #ifdef MACH_KERNEL
605 #define DPT_LOCK_INIT(lock) mutex_init(&(lock), ETAP_DPAGE_VSTRUCT)
606 #define DPT_LOCK(lock) mutex_lock(&(lock))
607 #define DPT_UNLOCK(lock) mutex_unlock(&(lock))
608 #define VS_LOCK_INIT(vs) hw_lock_init(&(vs)->vs_lock)
609 #define VS_TRY_LOCK(vs) (VS_LOCK(vs),TRUE)
610 #define VS_LOCK(vs) hw_lock_lock(&(vs)->vs_lock)
611 #define VS_UNLOCK(vs) hw_lock_unlock(&(vs)->vs_lock)
612 #else
613 #define VS_LOCK_INIT(vs) mutex_init(&(vs)->vs_lock, ETAP_DPAGE_VSTRUCT)
614 #define VS_TRY_LOCK(vs) mutex_try_lock(&(vs)->vs_lock)
615 #define VS_LOCK(vs) mutex_lock(&(vs)->vs_lock)
616 #define VS_UNLOCK(vs) mutex_unlock(&(vs)->vs_lock)
617 #endif
618
619 #ifdef MACH_KERNEL
620 #define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock, ETAP_DPAGE_VSMAP)
621 #else
622 #define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock)
623 #endif
624 #define VS_MAP_LOCK(vs) mutex_lock(&(vs)->vs_map_lock)
625 #ifndef MACH_KERNEL
626 #define VS_MAP_TRY_LOCK(vs) mutex_try_lock(&(vs)->vs_map_lock)
627 #else
628 #define VS_MAP_TRY_LOCK(vs) mutex_try(&(vs)->vs_map_lock)
629 #endif
630 #define VS_MAP_UNLOCK(vs) mutex_unlock(&(vs)->vs_map_lock)
631
632 /*
633 * Data structures and variables dealing with asynchronous
634 * completion of paging operations.
635 */
636 /*
637 * vs_async
638 * A structure passed to ps_write_device for asynchronous completions.
639 * It contains enough information to complete the write and
640 * inform the VM of its completion.
641 */
642 struct vs_async {
643 struct vs_async *vsa_next; /* pointer to next structure */
644 vstruct_t vsa_vs; /* the vstruct for the object */
645 vm_offset_t vsa_addr; /* the vaddr of the data moved */
646 vm_offset_t vsa_offset; /* the object offset of the data */
647 vm_size_t vsa_size; /* the number of bytes moved */
648 paging_segment_t vsa_ps; /* the paging segment used */
649 int vsa_flags; /* flags */
650 int vsa_error; /* error, if there is one */
651 mutex_t vsa_lock;
652 MACH_PORT_FACE reply_port; /* associated reply port */
653 };
654
655 /*
656 * flags values.
657 */
658 #define VSA_READ 0x0001
659 #define VSA_WRITE 0x0002
660 #define VSA_TRANSFER 0x0004
661
662 /*
663 * List of all vstructs. A specific vstruct is
664 * found directly via its port, this list is
665 * only used for monitoring purposes by the
666 * default_pager_object* calls
667 */
668 struct vstruct_list_head {
669 queue_head_t vsl_queue;
670 #ifdef MACH_KERNEL
671 mutex_t vsl_lock;
672 #else
673 struct mutex vsl_lock;
674 #endif
675 int vsl_count; /* saves code */
676 queue_head_t vsl_leak_queue;
677 };
678 extern struct vstruct_list_head vstruct_list;
679
680 #ifdef MACH_KERNEL
681 #define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock, ETAP_DPAGE_VSLIST)
682 #else
683 #define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock)
684 #endif
685 #define VSL_LOCK() mutex_lock(&vstruct_list.vsl_lock)
686 #define VSL_LOCK_TRY() mutex_try(&vstruct_list.vsl_lock)
687 #define VSL_UNLOCK() mutex_unlock(&vstruct_list.vsl_lock)
688
689 /*
690 * Create port alias for vstruct address.
691 *
692 * We assume that the last two bits of a vstruct address will be zero due to
693 * memory allocation restrictions, hence are available for use as a sanity
694 * check.
695 */
696 #ifdef MACH_KERNEL
697 #define ISVS 123456
698 #define port_is_vs(_port_) \
699 ((((struct vstruct_alias *)((_port_)->alias)) != NULL) && \
700 (((struct vstruct_alias *)(_port_)->alias)->name==ISVS))
701 #define port_to_vs(_port_) \
702 ((struct vstruct_alias *)(_port_)->alias)->vs
703 #define vs_to_port(_vs_) (_vs_->vs_mem_obj_port)
704 #define vs_lookup(_port_, _vs_) \
705 do { \
706 if ((((struct vstruct_alias *)(_port_)->alias) == NULL) || \
707 (((struct vstruct_alias *)(_port_)->alias)->name!=ISVS)) \
708 panic("bad pager port"); \
709 _vs_ = port_to_vs(_port_); \
710 } while (0)
711 #else
712 #define vs_to_port(_vs_) (((vm_offset_t)(_vs_))+1)
713 #define port_to_vs(_port_) ((vstruct_t)(((vm_offset_t)(_port_))&~3))
714 #define port_is_vs(_port_) ((((vm_offset_t)(_port_))&3) == 1)
715
716 #define vs_lookup(_port_, _vs_) \
717 do { \
718 if (!MACH_PORT_VALID(_port_) || !port_is_vs(_port_) \
719 || port_to_vs(_port_)->vs_mem_obj_port != (_port_)) \
720 Panic("bad pager port"); \
721 _vs_ = port_to_vs(_port_); \
722 } while (0)
723 #endif
724
725 /*
726 * Cross-module routines declaration.
727 */
728 #ifndef MACH_KERNEL
729 extern int dp_thread_id(void);
730 #endif
731 extern boolean_t device_reply_server(mach_msg_header_t *,
732 mach_msg_header_t *);
733 #ifdef MACH_KERNEL
734 extern void default_pager_no_senders(MACH_PORT_FACE,
735 mach_port_seqno_t,
736 mach_port_mscount_t);
737 #else
738 extern void default_pager_no_senders(memory_object_t,
739 mach_port_seqno_t,
740 mach_port_mscount_t);
741 #endif
742 extern int local_log2(unsigned int);
743 extern void bs_initialize(void);
744 extern void bs_global_info(vm_size_t *,
745 vm_size_t *);
746 extern boolean_t bs_add_device(char *,
747 MACH_PORT_FACE);
748 extern vstruct_t ps_vstruct_create(vm_size_t);
749 extern void ps_vstruct_dealloc(vstruct_t);
750 extern kern_return_t pvs_cluster_read(vstruct_t,
751 vm_offset_t,
752 vm_size_t);
753 extern kern_return_t vs_cluster_write(vstruct_t,
754 upl_t,
755 vm_offset_t,
756 vm_size_t,
757 boolean_t,
758 int);
759 extern vm_offset_t ps_clmap(vstruct_t,
760 vm_offset_t,
761 struct clmap *,
762 int,
763 vm_size_t,
764 int);
765 extern vm_size_t ps_vstruct_allocated_size(vstruct_t);
766 extern size_t ps_vstruct_allocated_pages(vstruct_t,
767 default_pager_page_t *,
768 size_t);
769 extern boolean_t bs_set_default_clsize(unsigned int);
770
771 extern boolean_t verbose;
772
773 #endif /* _DEFAULT_PAGER_INTERNAL_H_ */