2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * General definitions.
62 #ifndef _DEFAULT_PAGER_INTERNAL_H_
63 #define _DEFAULT_PAGER_INTERNAL_H_
65 #include <default_pager/diag.h>
66 #include <default_pager/default_pager_types.h>
67 #include <mach/mach_types.h>
68 #include <ipc/ipc_port.h>
69 #include <ipc/ipc_types.h>
70 #include <ipc/ipc_space.h>
71 #include <kern/lock.h>
72 #include <kern/kalloc.h>
73 #include <kern/thread.h>
74 #include <vm/vm_kern.h>
75 #include <device/device_types.h>
78 * Default option settings.
88 #define MACH_PORT_FACE mach_port_t
92 #define USE_PRECIOUS TRUE
97 #define UP(stuff) stuff
98 #else /* USER_PAGER */
100 #endif /* USER_PAGER */
103 extern struct mutex dprintf_lock
;
104 #define PRINTF_LOCK_INIT() mutex_init(&dprintf_lock)
105 #define PRINTF_LOCK() mutex_lock(&dprintf_lock)
106 #define PRINTF_UNLOCK() mutex_unlock(&dprintf_lock)
110 #define dprintf(args) \
113 printf("%s[%d]: ", my_name, dp_thread_id()); \
118 #define dprintf(args) \
120 printf("%s[KERNEL]: ", my_name); \
128 __private_extern__
char my_name
[];
130 #define DEFAULT_PAGER_DEBUG 0
132 #if DEFAULT_PAGER_DEBUG
134 extern int debug_mask
;
135 #define DEBUG_MSG_EXTERNAL 0x00000001
136 #define DEBUG_MSG_INTERNAL 0x00000002
137 #define DEBUG_MO_EXTERNAL 0x00000100
138 #define DEBUG_MO_INTERNAL 0x00000200
139 #define DEBUG_VS_EXTERNAL 0x00010000
140 #define DEBUG_VS_INTERNAL 0x00020000
141 #define DEBUG_BS_EXTERNAL 0x01000000
142 #define DEBUG_BS_INTERNAL 0x02000000
144 #define DP_DEBUG(level, args) \
146 if (debug_mask & (level)) \
150 #define ASSERT(expr) \
154 panic("%s[%d]%s: assertion failed in %s line %d: %s",\
155 my_name
, dp_thread_id(), here
, \
156 __FILE__
, __LINE__
, # expr); \
158 panic("%s[KERNEL]: assertion failed in %s line %d: %s",\
159 my_name
, __FILE__
, __LINE__
, # expr); \
163 #else /* DEFAULT_PAGER_DEBUG */
165 #define DP_DEBUG(level, args)
166 #define ASSERT(clause)
168 #endif /* DEFAULT_PAGER_DEBUG */
171 extern char *mach_error_string(kern_return_t
);
174 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
176 #define PAGER_SUCCESS 0
178 #define PAGER_ERROR 2
181 * VM and IPC globals.
184 #define vm_page_size page_size
186 extern vm_object_size_t vm_page_size
;
188 extern unsigned long long vm_page_mask
;
189 extern int vm_page_shift
;
192 #define ptoa(p) ((p)*vm_page_size)
193 #define atop(a) ((a)/vm_page_size)
195 #define howmany(a,b) (((a) + (b) - 1)/(b))
197 extern memory_object_default_t default_pager_object
;
200 extern mutex_t dpt_lock
; /* Lock for the dpt array */
201 extern int default_pager_internal_count
;
202 extern MACH_PORT_FACE default_pager_host_port
;
203 /* extern task_t default_pager_self; */ /* dont need or want */
204 extern MACH_PORT_FACE default_pager_internal_set
;
205 extern MACH_PORT_FACE default_pager_external_set
;
206 extern MACH_PORT_FACE default_pager_default_set
;
208 extern mach_port_t default_pager_host_port
;
209 extern task_port_t default_pager_self
;
210 extern mach_port_t default_pager_internal_set
;
211 extern mach_port_t default_pager_external_set
;
212 extern mach_port_t default_pager_default_set
;
215 typedef struct default_pager_thread
{
217 cthread_t dpt_thread
; /* Server thread. */
219 vm_offset_t dpt_buffer
; /* Read buffer. */
220 boolean_t dpt_internal
; /* Do we handle internal objects? */
222 int dpt_id
; /* thread id for printf */
226 boolean_t dpt_initialized_p
; /* Thread is ready for requests. */
227 } default_pager_thread_t
;
230 extern default_pager_thread_t
**dpt_array
;
237 unsigned int gs_pageout_calls
; /* # pageout calls */
238 unsigned int gs_pagein_calls
; /* # pagein calls */
239 unsigned int gs_pages_in
; /* # pages paged in (total) */
240 unsigned int gs_pages_out
; /* # pages paged out (total) */
241 unsigned int gs_pages_unavail
; /* # zero-fill pages */
242 unsigned int gs_pages_init
; /* # page init requests */
243 unsigned int gs_pages_init_writes
; /* # page init writes */
244 VSTATS_LOCK_DECL(gs_lock
)
246 #define GSTAT(clause) VSTATS_ACTION(&global_stats.gs_lock, (clause))
249 * Cluster related definitions.
250 * Clusters are sized in number of pages per cluster.
251 * Cluster sizes must be powers of two.
253 * These numbers are related to the struct vs_map,
256 #define MAX_CLUSTER_SIZE 8
257 #define MAX_CLUSTER_SHIFT 3
261 * bit map related macros
263 #define NBBY 8 /* bits per byte XXX */
264 #define BYTEMASK 0xff
265 #define setbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) |= 1<<((i)%NBBY))
266 #define clrbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) &= ~(1<<((i)%NBBY)))
267 #define isset(a,i) (*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY)))
268 #define isclr(a,i) ((*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY))) == 0)
272 * Backing Store Management.
278 #define BS_FULLPRI -2
281 * Mapping between backing store port and backing store object.
283 struct backing_store
{
284 queue_chain_t bs_links
; /* link in backing_store_list */
286 mutex_t bs_lock
; /* lock for the structure */
288 struct mutex bs_lock
; /* lock for the structure */
290 MACH_PORT_FACE bs_port
; /* backing store port */
292 int bs_clsize
; /* cluster size in pages */
295 unsigned int bs_pages_free
; /* # unallocated pages */
296 unsigned int bs_pages_total
; /* # pages (total) */
297 unsigned int bs_pages_in
; /* # page read requests */
298 unsigned int bs_pages_in_fail
; /* # page read errors */
299 unsigned int bs_pages_out
; /* # page write requests */
300 unsigned int bs_pages_out_fail
; /* # page write errors */
302 typedef struct backing_store
*backing_store_t
;
303 #define BACKING_STORE_NULL ((backing_store_t) 0)
304 #define BS_STAT(bs, clause) VSTATS_ACTION(&(bs)->bs_lock, (clause))
307 #define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock, 0)
309 #define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock)
311 #define BS_LOCK(bs) mutex_lock(&(bs)->bs_lock)
312 #define BS_UNLOCK(bs) mutex_unlock(&(bs)->bs_lock)
314 struct backing_store_list_head
{
315 queue_head_t bsl_queue
;
319 struct mutex bsl_lock
;
322 extern struct backing_store_list_head backing_store_list
;
323 extern int backing_store_release_trigger_disable
;
326 #define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock, 0)
328 #define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock)
330 #define BSL_LOCK() mutex_lock(&backing_store_list.bsl_lock)
331 #define BSL_UNLOCK() mutex_unlock(&backing_store_list.bsl_lock)
334 * Paging segment management.
335 * Controls allocation of blocks within paging area.
337 struct paging_segment
{
338 /* device management */
340 MACH_PORT_FACE dev
; /* Port to device */
341 struct vnode
*vnode
; /* vnode for bs file */
343 unsigned int ps_segtype
; /* file type or partition */
344 MACH_PORT_FACE ps_device
; /* Port to device */
345 vm_offset_t ps_offset
; /* Offset of segment within device */
346 vm_offset_t ps_recnum
; /* Number of device records in segment*/
347 unsigned int ps_pgnum
; /* Number of pages in segment */
348 unsigned int ps_record_shift
;/* Bit shift: pages to device records */
350 /* clusters and pages */
351 unsigned int ps_clshift
; /* Bit shift: clusters to pages */
352 unsigned int ps_ncls
; /* Number of clusters in segment */
353 unsigned int ps_clcount
; /* Number of free clusters */
354 unsigned int ps_pgcount
; /* Number of free pages */
355 unsigned long ps_hint
; /* Hint of where to look next. */
359 mutex_t ps_lock
; /* Lock for contents of struct */
361 struct mutex ps_lock
; /* Lock for contents of struct */
363 unsigned char *ps_bmap
; /* Map of used clusters */
366 backing_store_t ps_bs
; /* Backing store segment belongs to */
368 boolean_t ps_going_away
; /* Destroy attempt in progress */
371 #define ps_vnode storage_type.vnode
372 #define ps_device storage_type.dev
373 #define PS_PARTITION 1
376 typedef struct paging_segment
*paging_segment_t
;
378 #define PAGING_SEGMENT_NULL ((paging_segment_t) 0)
381 #define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock, 0)
383 #define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock)
385 #define PS_LOCK(ps) mutex_lock(&(ps)->ps_lock)
386 #define PS_UNLOCK(ps) mutex_unlock(&(ps)->ps_lock)
388 typedef unsigned int pseg_index_t
;
390 #define INVALID_PSEG_INDEX ((pseg_index_t)-1)
391 #define NULL_PSEG_INDEX ((pseg_index_t) 0)
393 * MAX_PSEG_INDEX value is related to struct vs_map below.
394 * "0" is reserved for empty map entries (no segment).
396 #define MAX_PSEG_INDEX 63 /* 0 is reserved for empty map */
397 #define MAX_NUM_PAGING_SEGMENTS MAX_PSEG_INDEX
399 /* paging segments array */
400 extern paging_segment_t paging_segments
[MAX_NUM_PAGING_SEGMENTS
];
402 extern mutex_t paging_segments_lock
;
404 extern struct mutex paging_segments_lock
;
406 extern int paging_segment_count
; /* number of active paging segments */
407 extern int paging_segment_max
; /* highest used paging segment index */
408 extern int ps_select_array
[DEFAULT_PAGER_BACKING_STORE_MAXPRI
+1];
411 #define PSL_LOCK_INIT() mutex_init(&paging_segments_lock, 0)
413 #define PSL_LOCK_INIT() mutex_init(&paging_segments_lock)
415 #define PSL_LOCK() mutex_lock(&paging_segments_lock)
416 #define PSL_UNLOCK() mutex_unlock(&paging_segments_lock)
419 * Vstruct manipulation. The vstruct is the pager's internal
420 * representation of vm objects it manages. There is one vstruct allocated
423 * The following data structures are defined for vstruct and vm object
429 * A structure used only for temporary objects. It is the element
430 * contained in the vs_clmap structure, which contains information
431 * about which clusters and pages in an object are present on backing
432 * store (a paging file).
433 * Note that this structure and its associated constants may change
434 * with minimal impact on code. The only function which knows the
435 * internals of this structure is ps_clmap().
437 * If it is necessary to change the maximum number of paging segments
438 * or pages in a cluster, then this structure is the one most
439 * affected. The constants and structures which *may* change are:
442 * MAX_NUM_PAGING_SEGMENTS
443 * VSTRUCT_DEF_CLSHIFT
444 * struct vs_map and associated macros and constants (VSM_*)
445 * (only the macro definitions need change, the exported (inside the
446 * pager only) interfaces remain the same; the constants are for
447 * internal vs_map manipulation only).
448 * struct clbmap (below).
451 unsigned int vsmap_entry
:23, /* offset in paging segment */
452 vsmap_psindex
:8, /* paging segment */
458 typedef struct vs_map
*vs_map_t
;
461 #define VSM_ENTRY_NULL 0x7fffff
464 * Exported macros for manipulating the vs_map structure --
465 * checking status, getting and setting bits.
467 #define VSCLSIZE(vs) (1UL << (vs)->vs_clshift)
468 #define VSM_ISCLR(vsm) (((vsm).vsmap_entry == VSM_ENTRY_NULL) && \
469 ((vsm).vsmap_error == 0))
470 #define VSM_ISERR(vsm) ((vsm).vsmap_error)
471 #define VSM_SETCLOFF(vsm, val) ((vsm).vsmap_entry = (val))
472 #define VSM_SETERR(vsm, err) ((vsm).vsmap_error = 1, \
473 (vsm).vsmap_entry = (err))
474 #define VSM_GETERR(vsm) ((vsm).vsmap_entry)
475 #define VSM_SETPG(vsm, page) ((vsm).vsmap_bmap |= (1 << (page)))
476 #define VSM_CLRPG(vsm, page) ((vsm).vsmap_bmap &= ~(1 << (page)))
477 #define VSM_SETPS(vsm, psindx) ((vsm).vsmap_psindex = (psindx))
478 #define VSM_PSINDEX(vsm) ((vsm).vsmap_psindex)
479 #define VSM_PS(vsm) paging_segments[(vsm).vsmap_psindex]
480 #define VSM_BMAP(vsm) ((vsm).vsmap_bmap)
481 #define VSM_CLOFF(vsm) ((vsm).vsmap_entry)
482 #define VSM_CLR(vsm) ((vsm).vsmap_entry = VSM_ENTRY_NULL, \
483 (vsm).vsmap_psindex = 0, \
484 (vsm).vsmap_error = 0, \
485 (vsm).vsmap_bmap = 0, \
486 (vsm).vsmap_alloc = 0)
487 #define VSM_ALLOC(vsm) ((vsm).vsmap_alloc)
488 #define VSM_SETALLOC(vsm, page) ((vsm).vsmap_alloc |= (1 << (page)))
489 #define VSM_CLRALLOC(vsm, page) ((vsm).vsmap_alloc &= ~(1 << (page)))
492 * Constants and macros for dealing with vstruct maps,
493 * which comprise vs_map structures, which
494 * map vm objects to backing storage (paging files and clusters).
496 #define CLMAP_THRESHOLD 512 /* bytes */
497 #define CLMAP_ENTRIES (CLMAP_THRESHOLD/sizeof(struct vs_map))
498 #define CLMAP_SIZE(ncls) (ncls*sizeof(struct vs_map))
500 #define INDIRECT_CLMAP_ENTRIES(ncls) (((ncls-1)/CLMAP_ENTRIES) + 1)
501 #define INDIRECT_CLMAP_SIZE(ncls) (INDIRECT_CLMAP_ENTRIES(ncls) * sizeof(struct vs_map *))
502 #define INDIRECT_CLMAP(size) (CLMAP_SIZE(size) > CLMAP_THRESHOLD)
504 #define RMAPSIZE(blocks) (howmany(blocks,NBBY))
512 * A cluster map returned by ps_clmap. It is an abstracted cluster of
513 * pages. It gives the caller information about the cluster
514 * desired. On read it tells the caller if a cluster is mapped, and if so,
515 * which of its pages are valid. It should not be referenced directly,
516 * except by ps_clmap; macros should be used. If the number of pages
517 * in a cluster needs to be more than 32, then the struct clbmap must
521 unsigned int clb_map
;
525 paging_segment_t cl_ps
; /* paging segment backing cluster */
526 int cl_numpages
; /* number of valid pages */
527 struct clbmap cl_bmap
; /* map of pages in cluster */
528 int cl_error
; /* cluster error value */
529 struct clbmap cl_alloc
; /* map of allocated pages in cluster */
532 #define CLMAP_ERROR(clm) (clm).cl_error
533 #define CLMAP_PS(clm) (clm).cl_ps
534 #define CLMAP_NPGS(clm) (clm).cl_numpages
535 #define CLMAP_ISSET(clm,i) ((1<<(i))&((clm).cl_bmap.clb_map))
536 #define CLMAP_ALLOC(clm) (clm).cl_alloc.clb_map
538 * Shift off unused bits in a partial cluster
540 #define CLMAP_SHIFT(clm,vs) \
541 (clm)->cl_bmap.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
542 #define CLMAP_SHIFTALLOC(clm,vs) \
543 (clm)->cl_alloc.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
545 typedef struct vstruct_alias
{
551 #define DPT_LOCK_INIT(lock) mutex_init(&(lock), 0)
552 #define DPT_LOCK(lock) mutex_lock(&(lock))
553 #define DPT_UNLOCK(lock) mutex_unlock(&(lock))
554 #define DPT_SLEEP(lock, e, i) thread_sleep_mutex(&(lock), (event_t)(e), i)
555 #define VS_LOCK_TYPE hw_lock_data_t
556 #define VS_LOCK_INIT(vs) hw_lock_init(&(vs)->vs_lock)
557 #define VS_TRY_LOCK(vs) (VS_LOCK(vs),TRUE)
558 #define VS_LOCK(vs) hw_lock_lock(&(vs)->vs_lock)
559 #define VS_UNLOCK(vs) hw_lock_unlock(&(vs)->vs_lock)
560 #define VS_MAP_LOCK_TYPE mutex_t
561 #define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock, 0)
562 #define VS_MAP_LOCK(vs) mutex_lock(&(vs)->vs_map_lock)
563 #define VS_MAP_TRY_LOCK(vs) mutex_try(&(vs)->vs_map_lock)
564 #define VS_MAP_UNLOCK(vs) mutex_unlock(&(vs)->vs_map_lock)
566 #define VS_LOCK_TYPE struct mutex
567 #define VS_LOCK_INIT(vs) mutex_init(&(vs)->vs_lock, 0)
568 #define VS_TRY_LOCK(vs) mutex_try(&(vs)->vs_lock)
569 #define VS_LOCK(vs) mutex_lock(&(vs)->vs_lock)
570 #define VS_UNLOCK(vs) mutex_unlock(&(vs)->vs_lock)
571 #define VS_MAP_LOCK_TYPE struct mutex
572 #define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock)
573 #define VS_MAP_LOCK(vs) mutex_lock(&(vs)->vs_map_lock)
574 #define VS_MAP_TRY_LOCK(vs) mutex_try(&(vs)->vs_map_lock)
575 #define VS_MAP_UNLOCK(vs) mutex_unlock(&(vs)->vs_map_lock)
580 * VM Object Structure: This is the structure used to manage
581 * default pager object associations with their control counter-
582 * parts (VM objects).
584 typedef struct vstruct
{
585 int *vs_mem_obj
; /* our memory obj - temp */
586 int vs_mem_obj_ikot
;/* JMM:fake ip_kotype() */
587 memory_object_control_t vs_control
; /* our mem obj control ref */
588 VS_LOCK_TYPE vs_lock
; /* data for the lock */
590 /* JMM - Could combine these first two in a single pending count now */
591 unsigned int vs_next_seqno
; /* next sequence num to issue */
592 unsigned int vs_seqno
; /* Pager port sequence number */
593 unsigned int vs_readers
; /* Reads in progress */
594 unsigned int vs_writers
; /* Writes in progress */
598 /* boolean_t */ vs_waiting_seqno
:1, /* to wait on seqno */
599 /* boolean_t */ vs_waiting_read
:1, /* waiting on reader? */
600 /* boolean_t */ vs_waiting_write
:1, /* waiting on writer? */
601 /* boolean_t */ vs_waiting_async
:1, /* waiting on async? */
602 /* boolean_t */ vs_indirect
:1, /* map indirect? */
603 /* boolean_t */ vs_xfer_pending
:1; /* xfer out of seg? */
605 event_t vs_waiting_seqno
;/* to wait on seqno */
606 event_t vs_waiting_read
; /* to wait on readers */
607 event_t vs_waiting_write
;/* to wait on writers */
608 event_t vs_waiting_async
;/* to wait on async_pending */
609 int vs_indirect
:1, /* Is the map indirect ? */
610 vs_xfer_pending
:1; /* xfering out of a seg ? */
613 unsigned int vs_async_pending
;/* pending async write count */
614 unsigned int vs_errors
; /* Pageout error count */
615 unsigned int vs_references
; /* references */
617 queue_chain_t vs_links
; /* Link in pager-wide list */
619 unsigned int vs_clshift
; /* Bit shift: clusters->pages */
620 unsigned int vs_size
; /* Object size in clusters */
622 mutex_t vs_map_lock
; /* to protect map below */
624 struct mutex vs_map_lock
; /* to protect map below */
627 struct vs_map
*vsu_dmap
; /* Direct map of clusters */
628 struct vs_map
**vsu_imap
; /* Indirect map of clusters */
632 #define vs_dmap vs_un.vsu_dmap
633 #define vs_imap vs_un.vsu_imap
635 #define VSTRUCT_NULL ((vstruct_t) 0)
637 __private_extern__
void vs_async_wait(vstruct_t
);
640 __private_extern__
void vs_lock(vstruct_t
);
641 __private_extern__
void vs_unlock(vstruct_t
);
642 __private_extern__
void vs_start_read(vstruct_t
);
643 __private_extern__
void vs_finish_read(vstruct_t
);
644 __private_extern__
void vs_wait_for_readers(vstruct_t
);
645 __private_extern__
void vs_start_write(vstruct_t
);
646 __private_extern__
void vs_finish_write(vstruct_t
);
647 __private_extern__
void vs_wait_for_writers(vstruct_t
);
648 __private_extern__
void vs_wait_for_sync_writers(vstruct_t
);
651 #define vs_unlock(vs)
652 #define vs_start_read(vs)
653 #define vs_wait_for_readers(vs)
654 #define vs_finish_read(vs)
655 #define vs_start_write(vs)
656 #define vs_wait_for_writers(vs)
657 #define vs_wait_for_sync_writers(vs)
658 #define vs_finish_write(vs)
659 #endif /* PARALLEL */
662 * Data structures and variables dealing with asynchronous
663 * completion of paging operations.
667 * A structure passed to ps_write_device for asynchronous completions.
668 * It contains enough information to complete the write and
669 * inform the VM of its completion.
672 struct vs_async
*vsa_next
; /* pointer to next structure */
673 vstruct_t vsa_vs
; /* the vstruct for the object */
674 vm_offset_t vsa_addr
; /* the vaddr of the data moved */
675 vm_offset_t vsa_offset
; /* the object offset of the data */
676 vm_size_t vsa_size
; /* the number of bytes moved */
677 paging_segment_t vsa_ps
; /* the paging segment used */
678 int vsa_flags
; /* flags */
679 int vsa_error
; /* error, if there is one */
681 MACH_PORT_FACE reply_port
; /* associated reply port */
687 #define VSA_READ 0x0001
688 #define VSA_WRITE 0x0002
689 #define VSA_TRANSFER 0x0004
692 * List of all vstructs. A specific vstruct is
693 * found directly via its port, this list is
694 * only used for monitoring purposes by the
695 * default_pager_object* calls
697 struct vstruct_list_head
{
698 queue_head_t vsl_queue
;
702 struct mutex vsl_lock
;
704 int vsl_count
; /* saves code */
707 __private_extern__
struct vstruct_list_head vstruct_list
;
709 __private_extern__
void vstruct_list_insert(vstruct_t vs
);
710 __private_extern__
void vstruct_list_delete(vstruct_t vs
);
714 #define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock, 0)
716 #define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock)
718 #define VSL_LOCK() mutex_lock(&vstruct_list.vsl_lock)
719 #define VSL_LOCK_TRY() mutex_try(&vstruct_list.vsl_lock)
720 #define VSL_UNLOCK() mutex_unlock(&vstruct_list.vsl_lock)
721 #define VSL_SLEEP(e,i) thread_sleep_mutex((e), &vstruct_list.vsl_lock, (i))
724 __private_extern__ zone_t vstruct_zone
;
728 * Create port alias for vstruct address.
730 * We assume that the last two bits of a vstruct address will be zero due to
731 * memory allocation restrictions, hence are available for use as a sanity
736 #define ISVS ((int *)123456)
737 #define mem_obj_is_vs(_mem_obj_) \
738 (((_mem_obj_) != NULL) && ((_mem_obj_)->pager == ISVS))
739 #define mem_obj_to_vs(_mem_obj_) \
740 ((vstruct_t)(_mem_obj_))
741 #define vs_to_mem_obj(_vs_) ((memory_object_t)(_vs_))
742 #define vs_lookup(_mem_obj_, _vs_) \
744 if (!mem_obj_is_vs(_mem_obj_)) \
745 panic("bad dp memory object"); \
746 _vs_ = mem_obj_to_vs(_mem_obj_); \
748 #define vs_lookup_safe(_mem_obj_, _vs_) \
750 if (!mem_obj_is_vs(_mem_obj_)) \
751 _vs_ = VSTRUCT_NULL; \
753 _vs_ = mem_obj_to_vs(_mem_obj_); \
757 #define vs_to_port(_vs_) (((vm_offset_t)(_vs_))+1)
758 #define port_to_vs(_port_) ((vstruct_t)(((vm_offset_t)(_port_))&~3))
759 #define port_is_vs(_port_) ((((vm_offset_t)(_port_))&3) == 1)
761 #define vs_lookup(_port_, _vs_) \
763 if (!MACH_PORT_VALID(_port_) || !port_is_vs(_port_) \
764 || port_to_vs(_port_)->vs_mem_obj != (_port_)) \
765 Panic("bad pager port"); \
766 _vs_ = port_to_vs(_port_); \
771 * Cross-module routines declaration.
774 extern int dp_thread_id(void);
776 extern boolean_t
device_reply_server(mach_msg_header_t
*,
777 mach_msg_header_t
*);
779 extern boolean_t
default_pager_no_senders(memory_object_t
,
780 mach_port_mscount_t
);
782 extern void default_pager_no_senders(memory_object_t
,
784 mach_port_mscount_t
);
787 extern int local_log2(unsigned int);
788 extern void bs_initialize(void);
789 extern void bs_global_info(vm_size_t
*,
791 extern boolean_t
bs_add_device(char *,
793 extern vstruct_t
ps_vstruct_create(vm_size_t
);
794 extern void ps_vstruct_dealloc(vstruct_t
);
795 extern kern_return_t
pvs_cluster_read(vstruct_t
,
798 extern kern_return_t
vs_cluster_write(vstruct_t
,
804 extern vm_offset_t
ps_clmap(vstruct_t
,
810 extern vm_size_t
ps_vstruct_allocated_size(vstruct_t
);
811 extern size_t ps_vstruct_allocated_pages(vstruct_t
,
812 default_pager_page_t
*,
814 extern boolean_t
bs_set_default_clsize(unsigned int);
816 extern boolean_t verbose
;
818 extern thread_call_t default_pager_backing_store_monitor_callout
;
819 extern void default_pager_backing_store_monitor(thread_call_param_t
, thread_call_param_t
);
821 extern ipc_port_t max_pages_trigger_port
;
822 extern unsigned int dp_pages_free
;
823 extern unsigned int maximum_pages_free
;
825 /* Do we know yet if swap files need to be encrypted ? */
826 extern boolean_t dp_encryption_inited
;
827 /* Should we encrypt data before writing to swap ? */
828 extern boolean_t dp_encryption
;
830 #endif /* _DEFAULT_PAGER_INTERNAL_H_ */