]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_fourk_pager.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / vm / vm_fourk_pager.c
CommitLineData
3e170ce0
A
1/*
2 * Copyright (c) 2014 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/errno.h>
30
31#include <mach/mach_types.h>
32#include <mach/mach_traps.h>
33#include <mach/host_priv.h>
34#include <mach/kern_return.h>
35#include <mach/memory_object_control.h>
36#include <mach/memory_object_types.h>
37#include <mach/port.h>
38#include <mach/policy.h>
39#include <mach/upl.h>
40#include <mach/thread_act.h>
41#include <mach/mach_vm.h>
42
43#include <kern/host.h>
44#include <kern/kalloc.h>
45#include <kern/page_decrypt.h>
46#include <kern/queue.h>
47#include <kern/thread.h>
39037602 48#include <kern/ipc_kobject.h>
3e170ce0
A
49
50#include <ipc/ipc_port.h>
51#include <ipc/ipc_space.h>
52
3e170ce0
A
53#include <vm/vm_fault.h>
54#include <vm/vm_map.h>
55#include <vm/vm_pageout.h>
56#include <vm/memory_object.h>
57#include <vm/vm_pageout.h>
58#include <vm/vm_protos.h>
39037602 59#include <vm/vm_kern.h>
3e170ce0
A
60
61
62/*
63 * 4K MEMORY PAGER
64 *
65 * This external memory manager (EMM) handles memory mappings that are
66 * 4K-aligned but not page-aligned and can therefore not be mapped directly.
67 *
68 * It mostly handles page-in requests (from memory_object_data_request()) by
69 * getting the data needed to fill in each 4K-chunk. That can require
70 * getting data from one or two pages from its backing VM object
71 * (a file or a "apple-protected" pager backed by an encrypted file), and
72 * copies the data to another page so that it is aligned as expected by
73 * the mapping.
74 *
75 * Returned pages can never be dirtied and must always be mapped copy-on-write,
76 * so the memory manager does not need to handle page-out requests (from
77 * memory_object_data_return()).
78 *
79 */
80
81/* forward declarations */
82void fourk_pager_reference(memory_object_t mem_obj);
83void fourk_pager_deallocate(memory_object_t mem_obj);
84kern_return_t fourk_pager_init(memory_object_t mem_obj,
85 memory_object_control_t control,
86 memory_object_cluster_size_t pg_size);
87kern_return_t fourk_pager_terminate(memory_object_t mem_obj);
88kern_return_t fourk_pager_data_request(memory_object_t mem_obj,
89 memory_object_offset_t offset,
90 memory_object_cluster_size_t length,
91 vm_prot_t protection_required,
92 memory_object_fault_info_t fault_info);
93kern_return_t fourk_pager_data_return(memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t data_cnt,
96 memory_object_offset_t *resid_offset,
97 int *io_error,
98 boolean_t dirty,
99 boolean_t kernel_copy,
100 int upl_flags);
101kern_return_t fourk_pager_data_initialize(memory_object_t mem_obj,
102 memory_object_offset_t offset,
103 memory_object_cluster_size_t data_cnt);
104kern_return_t fourk_pager_data_unlock(memory_object_t mem_obj,
105 memory_object_offset_t offset,
106 memory_object_size_t size,
107 vm_prot_t desired_access);
108kern_return_t fourk_pager_synchronize(memory_object_t mem_obj,
109 memory_object_offset_t offset,
110 memory_object_size_t length,
111 vm_sync_t sync_flags);
112kern_return_t fourk_pager_map(memory_object_t mem_obj,
113 vm_prot_t prot);
114kern_return_t fourk_pager_last_unmap(memory_object_t mem_obj);
115
116/*
117 * Vector of VM operations for this EMM.
118 * These routines are invoked by VM via the memory_object_*() interfaces.
119 */
120const struct memory_object_pager_ops fourk_pager_ops = {
121 fourk_pager_reference,
122 fourk_pager_deallocate,
123 fourk_pager_init,
124 fourk_pager_terminate,
125 fourk_pager_data_request,
126 fourk_pager_data_return,
127 fourk_pager_data_initialize,
128 fourk_pager_data_unlock,
129 fourk_pager_synchronize,
130 fourk_pager_map,
131 fourk_pager_last_unmap,
132 NULL, /* data_reclaim */
133 "fourk_pager"
134};
135
136/*
137 * The "fourk_pager" describes a memory object backed by
138 * the "4K" EMM.
139 */
140#define FOURK_PAGER_SLOTS 4 /* 16K / 4K */
141typedef struct fourk_pager_backing {
142 vm_object_t backing_object;
143 vm_object_offset_t backing_offset;
144} *fourk_pager_backing_t;
145typedef struct fourk_pager {
146 struct ipc_object_header pager_header; /* fake ip_kotype() */
147 memory_object_pager_ops_t pager_ops; /* == &fourk_pager_ops */
148 memory_object_control_t pager_control; /* mem object control handle */
149 queue_chain_t pager_queue; /* next & prev pagers */
150 unsigned int ref_count; /* reference count */
151 int is_ready; /* is this pager ready ? */
152 int is_mapped; /* is this mem_obj mapped ? */
153 struct fourk_pager_backing slots[FOURK_PAGER_SLOTS]; /* backing for each
154 4K-chunk */
155} *fourk_pager_t;
156#define FOURK_PAGER_NULL ((fourk_pager_t) NULL)
157#define pager_ikot pager_header.io_bits
158
159/*
160 * List of memory objects managed by this EMM.
161 * The list is protected by the "fourk_pager_lock" lock.
162 */
163int fourk_pager_count = 0; /* number of pagers */
164int fourk_pager_count_mapped = 0; /* number of unmapped pagers */
165queue_head_t fourk_pager_queue;
166decl_lck_mtx_data(,fourk_pager_lock)
167
168/*
169 * Maximum number of unmapped pagers we're willing to keep around.
170 */
171int fourk_pager_cache_limit = 0;
172
173/*
174 * Statistics & counters.
175 */
176int fourk_pager_count_max = 0;
177int fourk_pager_count_unmapped_max = 0;
178int fourk_pager_num_trim_max = 0;
179int fourk_pager_num_trim_total = 0;
180
181
182lck_grp_t fourk_pager_lck_grp;
183lck_grp_attr_t fourk_pager_lck_grp_attr;
184lck_attr_t fourk_pager_lck_attr;
185
186
187/* internal prototypes */
188fourk_pager_t fourk_pager_lookup(memory_object_t mem_obj);
189void fourk_pager_dequeue(fourk_pager_t pager);
190void fourk_pager_deallocate_internal(fourk_pager_t pager,
191 boolean_t locked);
192void fourk_pager_terminate_internal(fourk_pager_t pager);
193void fourk_pager_trim(void);
194
195
196#if DEBUG
197int fourk_pagerdebug = 0;
198#define PAGER_ALL 0xffffffff
199#define PAGER_INIT 0x00000001
200#define PAGER_PAGEIN 0x00000002
201
202#define PAGER_DEBUG(LEVEL, A) \
203 MACRO_BEGIN \
204 if ((fourk_pagerdebug & LEVEL)==LEVEL) { \
205 printf A; \
206 } \
207 MACRO_END
208#else
209#define PAGER_DEBUG(LEVEL, A)
210#endif
211
212
213void
214fourk_pager_bootstrap(void)
215{
216 lck_grp_attr_setdefault(&fourk_pager_lck_grp_attr);
217 lck_grp_init(&fourk_pager_lck_grp, "4K-pager", &fourk_pager_lck_grp_attr);
218 lck_attr_setdefault(&fourk_pager_lck_attr);
219 lck_mtx_init(&fourk_pager_lock, &fourk_pager_lck_grp, &fourk_pager_lck_attr);
220 queue_init(&fourk_pager_queue);
221}
222
223/*
224 * fourk_pager_init()
225 *
226 * Initialize the memory object and makes it ready to be used and mapped.
227 */
228kern_return_t
229fourk_pager_init(
230 memory_object_t mem_obj,
231 memory_object_control_t control,
232#if !DEBUG
233 __unused
234#endif
235 memory_object_cluster_size_t pg_size)
236{
237 fourk_pager_t pager;
238 kern_return_t kr;
239 memory_object_attr_info_data_t attributes;
240
241 PAGER_DEBUG(PAGER_ALL,
242 ("fourk_pager_init: %p, %p, %x\n",
243 mem_obj, control, pg_size));
244
245 if (control == MEMORY_OBJECT_CONTROL_NULL)
246 return KERN_INVALID_ARGUMENT;
247
248 pager = fourk_pager_lookup(mem_obj);
249
250 memory_object_control_reference(control);
251
252 pager->pager_control = control;
253
254 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
255 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
256 attributes.cluster_size = (1 << (PAGE_SHIFT));
257 attributes.may_cache_object = FALSE;
258 attributes.temporary = TRUE;
259
260 kr = memory_object_change_attributes(
261 control,
262 MEMORY_OBJECT_ATTRIBUTE_INFO,
263 (memory_object_info_t) &attributes,
264 MEMORY_OBJECT_ATTR_INFO_COUNT);
265 if (kr != KERN_SUCCESS)
266 panic("fourk_pager_init: "
267 "memory_object_change_attributes() failed");
268
39037602
A
269#if CONFIG_SECLUDED_MEMORY
270 if (secluded_for_filecache) {
271 memory_object_mark_eligible_for_secluded(control, TRUE);
272 }
273#endif /* CONFIG_SECLUDED_MEMORY */
274
3e170ce0
A
275 return KERN_SUCCESS;
276}
277
278/*
279 * fourk_pager_data_return()
280 *
281 * Handles page-out requests from VM. This should never happen since
282 * the pages provided by this EMM are not supposed to be dirty or dirtied
283 * and VM should simply discard the contents and reclaim the pages if it
284 * needs to.
285 */
286kern_return_t
287fourk_pager_data_return(
288 __unused memory_object_t mem_obj,
289 __unused memory_object_offset_t offset,
290 __unused memory_object_cluster_size_t data_cnt,
291 __unused memory_object_offset_t *resid_offset,
292 __unused int *io_error,
293 __unused boolean_t dirty,
294 __unused boolean_t kernel_copy,
295 __unused int upl_flags)
296{
297 panic("fourk_pager_data_return: should never get called");
298 return KERN_FAILURE;
299}
300
301kern_return_t
302fourk_pager_data_initialize(
303 __unused memory_object_t mem_obj,
304 __unused memory_object_offset_t offset,
305 __unused memory_object_cluster_size_t data_cnt)
306{
307 panic("fourk_pager_data_initialize: should never get called");
308 return KERN_FAILURE;
309}
310
311kern_return_t
312fourk_pager_data_unlock(
313 __unused memory_object_t mem_obj,
314 __unused memory_object_offset_t offset,
315 __unused memory_object_size_t size,
316 __unused vm_prot_t desired_access)
317{
318 return KERN_FAILURE;
319}
320
321/*
322 * fourk_pager_reference()
323 *
324 * Get a reference on this memory object.
325 * For external usage only. Assumes that the initial reference count is not 0,
326 * i.e one should not "revive" a dead pager this way.
327 */
328void
329fourk_pager_reference(
330 memory_object_t mem_obj)
331{
332 fourk_pager_t pager;
333
334 pager = fourk_pager_lookup(mem_obj);
335
336 lck_mtx_lock(&fourk_pager_lock);
337 assert(pager->ref_count > 0);
338 pager->ref_count++;
339 lck_mtx_unlock(&fourk_pager_lock);
340}
341
342
343/*
344 * fourk_pager_dequeue:
345 *
346 * Removes a pager from the list of pagers.
347 *
348 * The caller must hold "fourk_pager_lock".
349 */
350void
351fourk_pager_dequeue(
352 fourk_pager_t pager)
353{
354 assert(!pager->is_mapped);
355
356 queue_remove(&fourk_pager_queue,
357 pager,
358 fourk_pager_t,
359 pager_queue);
360 pager->pager_queue.next = NULL;
361 pager->pager_queue.prev = NULL;
362
363 fourk_pager_count--;
364}
365
366/*
367 * fourk_pager_terminate_internal:
368 *
369 * Trigger the asynchronous termination of the memory object associated
370 * with this pager.
371 * When the memory object is terminated, there will be one more call
372 * to memory_object_deallocate() (i.e. fourk_pager_deallocate())
373 * to finish the clean up.
374 *
375 * "fourk_pager_lock" should not be held by the caller.
376 * We don't need the lock because the pager has already been removed from
377 * the pagers' list and is now ours exclusively.
378 */
379void
380fourk_pager_terminate_internal(
381 fourk_pager_t pager)
382{
383 int i;
384
385 assert(pager->is_ready);
386 assert(!pager->is_mapped);
387
388 for (i = 0; i < FOURK_PAGER_SLOTS; i++) {
389 if (pager->slots[i].backing_object != VM_OBJECT_NULL &&
390 pager->slots[i].backing_object != (vm_object_t) -1) {
391 vm_object_deallocate(pager->slots[i].backing_object);
392 pager->slots[i].backing_object = (vm_object_t) -1;
393 pager->slots[i].backing_offset = (vm_object_offset_t) -1;
394 }
395 }
396
397 /* trigger the destruction of the memory object */
398 memory_object_destroy(pager->pager_control, 0);
399}
400
401/*
402 * fourk_pager_deallocate_internal()
403 *
404 * Release a reference on this pager and free it when the last
405 * reference goes away.
406 * Can be called with fourk_pager_lock held or not but always returns
407 * with it unlocked.
408 */
409void
410fourk_pager_deallocate_internal(
411 fourk_pager_t pager,
412 boolean_t locked)
413{
414 boolean_t needs_trimming;
415 int count_unmapped;
416
417 if (! locked) {
418 lck_mtx_lock(&fourk_pager_lock);
419 }
420
421 count_unmapped = (fourk_pager_count -
422 fourk_pager_count_mapped);
423 if (count_unmapped > fourk_pager_cache_limit) {
424 /* we have too many unmapped pagers: trim some */
425 needs_trimming = TRUE;
426 } else {
427 needs_trimming = FALSE;
428 }
429
430 /* drop a reference on this pager */
431 pager->ref_count--;
432
433 if (pager->ref_count == 1) {
434 /*
435 * Only the "named" reference is left, which means that
436 * no one is really holding on to this pager anymore.
437 * Terminate it.
438 */
439 fourk_pager_dequeue(pager);
440 /* the pager is all ours: no need for the lock now */
441 lck_mtx_unlock(&fourk_pager_lock);
442 fourk_pager_terminate_internal(pager);
443 } else if (pager->ref_count == 0) {
444 /*
445 * Dropped the existence reference; the memory object has
446 * been terminated. Do some final cleanup and release the
447 * pager structure.
448 */
449 lck_mtx_unlock(&fourk_pager_lock);
450 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
451 memory_object_control_deallocate(pager->pager_control);
452 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
453 }
454 kfree(pager, sizeof (*pager));
455 pager = FOURK_PAGER_NULL;
456 } else {
457 /* there are still plenty of references: keep going... */
458 lck_mtx_unlock(&fourk_pager_lock);
459 }
460
461 if (needs_trimming) {
462 fourk_pager_trim();
463 }
464 /* caution: lock is not held on return... */
465}
466
467/*
468 * fourk_pager_deallocate()
469 *
470 * Release a reference on this pager and free it when the last
471 * reference goes away.
472 */
473void
474fourk_pager_deallocate(
475 memory_object_t mem_obj)
476{
477 fourk_pager_t pager;
478
479 PAGER_DEBUG(PAGER_ALL, ("fourk_pager_deallocate: %p\n", mem_obj));
480 pager = fourk_pager_lookup(mem_obj);
481 fourk_pager_deallocate_internal(pager, FALSE);
482}
483
484/*
485 *
486 */
487kern_return_t
488fourk_pager_terminate(
489#if !DEBUG
490 __unused
491#endif
492 memory_object_t mem_obj)
493{
494 PAGER_DEBUG(PAGER_ALL, ("fourk_pager_terminate: %p\n", mem_obj));
495
496 return KERN_SUCCESS;
497}
498
499/*
500 *
501 */
502kern_return_t
503fourk_pager_synchronize(
504 memory_object_t mem_obj,
505 memory_object_offset_t offset,
506 memory_object_size_t length,
507 __unused vm_sync_t sync_flags)
508{
509 fourk_pager_t pager;
510
511 PAGER_DEBUG(PAGER_ALL, ("fourk_pager_synchronize: %p\n", mem_obj));
512
513 pager = fourk_pager_lookup(mem_obj);
514
515 memory_object_synchronize_completed(pager->pager_control,
516 offset, length);
517
518 return KERN_SUCCESS;
519}
520
521/*
522 * fourk_pager_map()
523 *
524 * This allows VM to let us, the EMM, know that this memory object
525 * is currently mapped one or more times. This is called by VM each time
526 * the memory object gets mapped and we take one extra reference on the
527 * memory object to account for all its mappings.
528 */
529kern_return_t
530fourk_pager_map(
531 memory_object_t mem_obj,
532 __unused vm_prot_t prot)
533{
534 fourk_pager_t pager;
535
536 PAGER_DEBUG(PAGER_ALL, ("fourk_pager_map: %p\n", mem_obj));
537
538 pager = fourk_pager_lookup(mem_obj);
539
540 lck_mtx_lock(&fourk_pager_lock);
541 assert(pager->is_ready);
542 assert(pager->ref_count > 0); /* pager is alive */
543 if (pager->is_mapped == FALSE) {
544 /*
545 * First mapping of this pager: take an extra reference
546 * that will remain until all the mappings of this pager
547 * are removed.
548 */
549 pager->is_mapped = TRUE;
550 pager->ref_count++;
551 fourk_pager_count_mapped++;
552 }
553 lck_mtx_unlock(&fourk_pager_lock);
554
555 return KERN_SUCCESS;
556}
557
558/*
559 * fourk_pager_last_unmap()
560 *
561 * This is called by VM when this memory object is no longer mapped anywhere.
562 */
563kern_return_t
564fourk_pager_last_unmap(
565 memory_object_t mem_obj)
566{
567 fourk_pager_t pager;
568 int count_unmapped;
569
570 PAGER_DEBUG(PAGER_ALL,
571 ("fourk_pager_last_unmap: %p\n", mem_obj));
572
573 pager = fourk_pager_lookup(mem_obj);
574
575 lck_mtx_lock(&fourk_pager_lock);
576 if (pager->is_mapped) {
577 /*
578 * All the mappings are gone, so let go of the one extra
579 * reference that represents all the mappings of this pager.
580 */
581 fourk_pager_count_mapped--;
582 count_unmapped = (fourk_pager_count -
583 fourk_pager_count_mapped);
584 if (count_unmapped > fourk_pager_count_unmapped_max) {
585 fourk_pager_count_unmapped_max = count_unmapped;
586 }
587 pager->is_mapped = FALSE;
588 fourk_pager_deallocate_internal(pager, TRUE);
589 /* caution: deallocate_internal() released the lock ! */
590 } else {
591 lck_mtx_unlock(&fourk_pager_lock);
592 }
593
594 return KERN_SUCCESS;
595}
596
597
598/*
599 *
600 */
601fourk_pager_t
602fourk_pager_lookup(
603 memory_object_t mem_obj)
604{
605 fourk_pager_t pager;
606
607 pager = (fourk_pager_t) mem_obj;
608 assert(pager->pager_ops == &fourk_pager_ops);
609 assert(pager->ref_count > 0);
610 return pager;
611}
612
613void
614fourk_pager_trim(void)
615{
616 fourk_pager_t pager, prev_pager;
617 queue_head_t trim_queue;
618 int num_trim;
619 int count_unmapped;
620
621 lck_mtx_lock(&fourk_pager_lock);
622
623 /*
624 * We have too many pagers, try and trim some unused ones,
625 * starting with the oldest pager at the end of the queue.
626 */
627 queue_init(&trim_queue);
628 num_trim = 0;
629
630 for (pager = (fourk_pager_t)
631 queue_last(&fourk_pager_queue);
632 !queue_end(&fourk_pager_queue,
633 (queue_entry_t) pager);
634 pager = prev_pager) {
635 /* get prev elt before we dequeue */
636 prev_pager = (fourk_pager_t)
637 queue_prev(&pager->pager_queue);
638
639 if (pager->ref_count == 2 &&
640 pager->is_ready &&
641 !pager->is_mapped) {
642 /* this pager can be trimmed */
643 num_trim++;
644 /* remove this pager from the main list ... */
645 fourk_pager_dequeue(pager);
646 /* ... and add it to our trim queue */
647 queue_enter_first(&trim_queue,
648 pager,
649 fourk_pager_t,
650 pager_queue);
651
652 count_unmapped = (fourk_pager_count -
653 fourk_pager_count_mapped);
654 if (count_unmapped <= fourk_pager_cache_limit) {
655 /* we have enough pagers to trim */
656 break;
657 }
658 }
659 }
660 if (num_trim > fourk_pager_num_trim_max) {
661 fourk_pager_num_trim_max = num_trim;
662 }
663 fourk_pager_num_trim_total += num_trim;
664
665 lck_mtx_unlock(&fourk_pager_lock);
666
667 /* terminate the trimmed pagers */
668 while (!queue_empty(&trim_queue)) {
669 queue_remove_first(&trim_queue,
670 pager,
671 fourk_pager_t,
672 pager_queue);
673 pager->pager_queue.next = NULL;
674 pager->pager_queue.prev = NULL;
675 assert(pager->ref_count == 2);
676 /*
677 * We can't call deallocate_internal() because the pager
678 * has already been dequeued, but we still need to remove
679 * a reference.
680 */
681 pager->ref_count--;
682 fourk_pager_terminate_internal(pager);
683 }
684}
685
686
687
688
689
690
691vm_object_t
692fourk_pager_to_vm_object(
693 memory_object_t mem_obj)
694{
695 fourk_pager_t pager;
696 vm_object_t object;
697
698 pager = fourk_pager_lookup(mem_obj);
699 if (pager == NULL) {
700 return VM_OBJECT_NULL;
701 }
702
703 assert(pager->ref_count > 0);
704 assert(pager->pager_control != MEMORY_OBJECT_CONTROL_NULL);
705 object = memory_object_control_to_vm_object(pager->pager_control);
706 assert(object != VM_OBJECT_NULL);
707 return object;
708}
709
710memory_object_t
711fourk_pager_create(void)
712{
713 fourk_pager_t pager;
714 memory_object_control_t control;
715 kern_return_t kr;
716 int i;
717
718#if 00
719 if (PAGE_SIZE_64 == FOURK_PAGE_SIZE) {
720 panic("fourk_pager_create: page size is 4K !?");
721 }
722#endif
723
724 pager = (fourk_pager_t) kalloc(sizeof (*pager));
725 if (pager == FOURK_PAGER_NULL) {
726 return MEMORY_OBJECT_NULL;
727 }
728 bzero(pager, sizeof (*pager));
729
730 /*
731 * The vm_map call takes both named entry ports and raw memory
732 * objects in the same parameter. We need to make sure that
733 * vm_map does not see this object as a named entry port. So,
734 * we reserve the first word in the object for a fake ip_kotype
735 * setting - that will tell vm_map to use it as a memory object.
736 */
737 pager->pager_ops = &fourk_pager_ops;
738 pager->pager_ikot = IKOT_MEMORY_OBJECT;
739 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
740 pager->ref_count = 2; /* existence + setup reference */
741 pager->is_ready = FALSE;/* not ready until it has a "name" */
742 pager->is_mapped = FALSE;
743
744 for (i = 0; i < FOURK_PAGER_SLOTS; i++) {
745 pager->slots[i].backing_object = (vm_object_t) -1;
746 pager->slots[i].backing_offset = (vm_object_offset_t) -1;
747 }
748
749 lck_mtx_lock(&fourk_pager_lock);
750
751 /* enter new pager at the head of our list of pagers */
752 queue_enter_first(&fourk_pager_queue,
753 pager,
754 fourk_pager_t,
755 pager_queue);
756 fourk_pager_count++;
757 if (fourk_pager_count > fourk_pager_count_max) {
758 fourk_pager_count_max = fourk_pager_count;
759 }
760 lck_mtx_unlock(&fourk_pager_lock);
761
762 kr = memory_object_create_named((memory_object_t) pager,
763 0,
764 &control);
765 assert(kr == KERN_SUCCESS);
766
767 lck_mtx_lock(&fourk_pager_lock);
768 /* the new pager is now ready to be used */
769 pager->is_ready = TRUE;
770 lck_mtx_unlock(&fourk_pager_lock);
771
772 /* wakeup anyone waiting for this pager to be ready */
773 thread_wakeup(&pager->is_ready);
774
775 return (memory_object_t) pager;
776}
777
778/*
779 * fourk_pager_data_request()
780 *
781 * Handles page-in requests from VM.
782 */
783int fourk_pager_data_request_debug = 0;
784kern_return_t
785fourk_pager_data_request(
786 memory_object_t mem_obj,
787 memory_object_offset_t offset,
788 memory_object_cluster_size_t length,
789#if !DEBUG
790 __unused
791#endif
792 vm_prot_t protection_required,
793 memory_object_fault_info_t mo_fault_info)
794{
795 fourk_pager_t pager;
796 memory_object_control_t mo_control;
797 upl_t upl;
798 int upl_flags;
799 upl_size_t upl_size;
800 upl_page_info_t *upl_pl;
801 unsigned int pl_count;
802 vm_object_t dst_object;
803 kern_return_t kr, retval;
804 vm_map_offset_t kernel_mapping;
805 vm_offset_t src_vaddr, dst_vaddr;
806 vm_offset_t cur_offset;
807 int sub_page;
808 int sub_page_idx, sub_page_cnt;
809
810 pager = fourk_pager_lookup(mem_obj);
811 assert(pager->is_ready);
812 assert(pager->ref_count > 1); /* pager is alive and mapped */
813
814 PAGER_DEBUG(PAGER_PAGEIN, ("fourk_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
815
816 retval = KERN_SUCCESS;
817 kernel_mapping = 0;
818
819 offset = memory_object_trunc_page(offset);
820
821 /*
822 * Gather in a UPL all the VM pages requested by VM.
823 */
824 mo_control = pager->pager_control;
825
826 upl_size = length;
827 upl_flags =
828 UPL_RET_ONLY_ABSENT |
829 UPL_SET_LITE |
830 UPL_NO_SYNC |
831 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
832 UPL_SET_INTERNAL;
833 pl_count = 0;
834 kr = memory_object_upl_request(mo_control,
835 offset, upl_size,
836 &upl, NULL, NULL, upl_flags);
837 if (kr != KERN_SUCCESS) {
838 retval = kr;
839 goto done;
840 }
841 dst_object = mo_control->moc_object;
842 assert(dst_object != VM_OBJECT_NULL);
843
844#if __x86_64__ || __arm__ || __arm64__
845 /* use the 1-to-1 mapping of physical memory */
846#else /* __x86_64__ || __arm__ || __arm64__ */
847 /*
848 * Reserve 2 virtual pages in the kernel address space to map the
849 * source and destination physical pages when it's their turn to
850 * be processed.
851 */
852 vm_map_entry_t map_entry;
853
854 vm_object_reference(kernel_object); /* ref. for mapping */
855 kr = vm_map_find_space(kernel_map,
856 &kernel_mapping,
857 2 * PAGE_SIZE_64,
858 0,
859 0,
860 &map_entry);
861 if (kr != KERN_SUCCESS) {
862 vm_object_deallocate(kernel_object);
863 retval = kr;
864 goto done;
865 }
866 map_entry->object.vm_object = kernel_object;
867 map_entry->offset = kernel_mapping;
868 vm_map_unlock(kernel_map);
869 src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
870 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64);
871#endif /* __x86_64__ || __arm__ || __arm64__ */
872
873 /*
874 * Fill in the contents of the pages requested by VM.
875 */
876 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
877 pl_count = length / PAGE_SIZE;
878 for (cur_offset = 0;
879 retval == KERN_SUCCESS && cur_offset < length;
880 cur_offset += PAGE_SIZE) {
881 ppnum_t dst_pnum;
882 int num_subpg_signed, num_subpg_validated;
883 int num_subpg_tainted, num_subpg_nx;
884
885 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
886 /* this page is not in the UPL: skip it */
887 continue;
888 }
889
890 /*
891 * Establish an explicit pmap mapping of the destination
892 * physical page.
893 * We can't do a regular VM mapping because the VM page
894 * is "busy".
895 */
896 dst_pnum = (ppnum_t)
897 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
898 assert(dst_pnum != 0);
899#if __x86_64__
900 dst_vaddr = (vm_map_offset_t)
901 PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
902#else
903 pmap_enter(kernel_pmap,
904 dst_vaddr,
905 dst_pnum,
906 VM_PROT_READ | VM_PROT_WRITE,
907 VM_PROT_NONE,
908 0,
909 TRUE);
910#endif
911
912 /* retrieve appropriate data for each 4K-page in this page */
913 if (PAGE_SHIFT == FOURK_PAGE_SHIFT &&
914 page_shift_user32 == SIXTEENK_PAGE_SHIFT) {
915 /*
916 * Find the slot for the requested 4KB page in
917 * the 16K page...
918 */
919 assert(PAGE_SHIFT == FOURK_PAGE_SHIFT);
920 assert(page_shift_user32 == SIXTEENK_PAGE_SHIFT);
921 sub_page_idx = ((offset & SIXTEENK_PAGE_MASK) /
922 PAGE_SIZE);
923 /*
924 * ... and provide only that one 4KB page.
925 */
926 sub_page_cnt = 1;
927 } else {
928 /*
929 * Iterate over all slots, i.e. retrieve all four 4KB
930 * pages in the requested 16KB page.
931 */
932 assert(PAGE_SHIFT == SIXTEENK_PAGE_SHIFT);
933 sub_page_idx = 0;
934 sub_page_cnt = FOURK_PAGER_SLOTS;
935 }
936
937 num_subpg_signed = 0;
938 num_subpg_validated = 0;
939 num_subpg_tainted = 0;
940 num_subpg_nx = 0;
941
942 /* retrieve appropriate data for each 4K-page in this page */
943 for (sub_page = sub_page_idx;
944 sub_page < sub_page_idx + sub_page_cnt;
945 sub_page++) {
946 vm_object_t src_object;
947 memory_object_offset_t src_offset;
948 vm_offset_t offset_in_src_page;
949 kern_return_t error_code;
39037602 950 vm_object_t src_page_object;
3e170ce0
A
951 vm_page_t src_page;
952 vm_page_t top_page;
953 vm_prot_t prot;
954 int interruptible;
955 struct vm_object_fault_info fault_info;
956 boolean_t subpg_validated;
957 unsigned subpg_tainted;
958
959
960 if (offset < SIXTEENK_PAGE_SIZE) {
961 /*
962 * The 1st 16K-page can cover multiple
963 * sub-mappings, as described in the
964 * pager->slots[] array.
965 */
966 src_object =
967 pager->slots[sub_page].backing_object;
968 src_offset =
969 pager->slots[sub_page].backing_offset;
970 } else {
971 fourk_pager_backing_t slot;
972
973 /*
974 * Beyond the 1st 16K-page in the pager is
975 * an extension of the last "sub page" in
976 * the pager->slots[] array.
977 */
978 slot = &pager->slots[FOURK_PAGER_SLOTS-1];
979 src_object = slot->backing_object;
980 src_offset = slot->backing_offset;
981 src_offset += FOURK_PAGE_SIZE;
982 src_offset +=
983 (vm_map_trunc_page(offset,
984 SIXTEENK_PAGE_MASK)
985 - SIXTEENK_PAGE_SIZE);
986 src_offset += sub_page * FOURK_PAGE_SIZE;
987 }
988 offset_in_src_page = src_offset & PAGE_MASK_64;
989 src_offset = vm_object_trunc_page(src_offset);
990
991 if (src_object == VM_OBJECT_NULL ||
992 src_object == (vm_object_t) -1) {
993 /* zero-fill */
994 bzero((char *)(dst_vaddr +
995 ((sub_page-sub_page_idx)
996 * FOURK_PAGE_SIZE)),
997 FOURK_PAGE_SIZE);
998 if (fourk_pager_data_request_debug) {
999 printf("fourk_pager_data_request"
1000 "(%p,0x%llx+0x%lx+0x%04x): "
1001 "ZERO\n",
1002 pager,
1003 offset,
1004 cur_offset,
1005 ((sub_page - sub_page_idx)
1006 * FOURK_PAGE_SIZE));
1007 }
1008 continue;
1009 }
1010
1011 /* fault in the source page from src_object */
1012 retry_src_fault:
1013 src_page = VM_PAGE_NULL;
1014 top_page = VM_PAGE_NULL;
1015 fault_info = *((struct vm_object_fault_info *)
1016 (uintptr_t)mo_fault_info);
1017 fault_info.stealth = TRUE;
1018 fault_info.io_sync = FALSE;
1019 fault_info.mark_zf_absent = FALSE;
1020 fault_info.batch_pmap_op = FALSE;
1021 interruptible = fault_info.interruptible;
1022 prot = VM_PROT_READ;
1023 error_code = 0;
1024
1025 vm_object_lock(src_object);
1026 vm_object_paging_begin(src_object);
1027 kr = vm_fault_page(src_object,
1028 src_offset,
1029 VM_PROT_READ,
1030 FALSE,
1031 FALSE, /* src_page not looked up */
1032 &prot,
1033 &src_page,
1034 &top_page,
1035 NULL,
1036 &error_code,
1037 FALSE,
1038 FALSE,
1039 &fault_info);
1040 switch (kr) {
1041 case VM_FAULT_SUCCESS:
1042 break;
1043 case VM_FAULT_RETRY:
1044 goto retry_src_fault;
1045 case VM_FAULT_MEMORY_SHORTAGE:
1046 if (vm_page_wait(interruptible)) {
1047 goto retry_src_fault;
1048 }
1049 /* fall thru */
1050 case VM_FAULT_INTERRUPTED:
1051 retval = MACH_SEND_INTERRUPTED;
1052 goto src_fault_done;
1053 case VM_FAULT_SUCCESS_NO_VM_PAGE:
1054 /* success but no VM page: fail */
1055 vm_object_paging_end(src_object);
1056 vm_object_unlock(src_object);
1057 /*FALLTHROUGH*/
1058 case VM_FAULT_MEMORY_ERROR:
1059 /* the page is not there! */
1060 if (error_code) {
1061 retval = error_code;
1062 } else {
1063 retval = KERN_MEMORY_ERROR;
1064 }
1065 goto src_fault_done;
1066 default:
1067 panic("fourk_pager_data_request: "
1068 "vm_fault_page() unexpected error 0x%x\n",
1069 kr);
1070 }
1071 assert(src_page != VM_PAGE_NULL);
1072 assert(src_page->busy);
1073
39037602
A
1074 src_page_object = VM_PAGE_OBJECT(src_page);
1075
1076 if (( !VM_PAGE_PAGEABLE(src_page)) &&
3e170ce0
A
1077 !VM_PAGE_WIRED(src_page)) {
1078 vm_page_lockspin_queues();
39037602 1079 if (( !VM_PAGE_PAGEABLE(src_page)) &&
3e170ce0
A
1080 !VM_PAGE_WIRED(src_page)) {
1081 vm_page_deactivate(src_page);
1082 }
1083 vm_page_unlock_queues();
1084 }
1085
1086#if __x86_64__
1087 src_vaddr = (vm_map_offset_t)
39037602 1088 PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
3e170ce0
A
1089 << PAGE_SHIFT);
1090#else
1091 /*
1092 * Establish an explicit mapping of the source
1093 * physical page.
1094 */
1095 pmap_enter(kernel_pmap,
1096 src_vaddr,
39037602 1097 VM_PAGE_GET_PHYS_PAGE(src_page),
3e170ce0
A
1098 VM_PROT_READ,
1099 VM_PROT_NONE,
1100 0,
1101 TRUE);
1102#endif
1103
1104 /*
1105 * Validate the 4K page we want from
1106 * this source page...
1107 */
1108 subpg_validated = FALSE;
1109 subpg_tainted = 0;
39037602 1110 if (src_page_object->code_signed) {
3e170ce0
A
1111 vm_page_validate_cs_mapped_chunk(
1112 src_page,
1113 (const void *) src_vaddr,
1114 offset_in_src_page,
39037602 1115 FOURK_PAGE_SIZE,
3e170ce0
A
1116 &subpg_validated,
1117 &subpg_tainted);
1118 num_subpg_signed++;
1119 if (subpg_validated) {
1120 num_subpg_validated++;
1121 }
1122 if (subpg_tainted & CS_VALIDATE_TAINTED) {
1123 num_subpg_tainted++;
1124 }
1125 if (subpg_tainted & CS_VALIDATE_NX) {
1126 /* subpg should not be executable */
1127 if (sub_page_cnt > 1) {
1128 /*
1129 * The destination page has
1130 * more than 1 subpage and its
1131 * other subpages might need
1132 * EXEC, so we do not propagate
1133 * CS_VALIDATE_NX to the
1134 * destination page...
1135 */
1136 } else {
1137 num_subpg_nx++;
1138 }
1139 }
1140 }
1141
1142 /*
1143 * Copy the relevant portion of the source page
1144 * into the appropriate part of the destination page.
1145 */
1146 bcopy((const char *)(src_vaddr + offset_in_src_page),
1147 (char *)(dst_vaddr +
1148 ((sub_page - sub_page_idx) *
1149 FOURK_PAGE_SIZE)),
1150 FOURK_PAGE_SIZE);
1151 if (fourk_pager_data_request_debug) {
1152 printf("fourk_data_request"
1153 "(%p,0x%llx+0x%lx+0x%04x): "
1154 "backed by [%p:0x%llx]: "
1155 "[0x%016llx 0x%016llx] "
1156 "code_signed=%d "
1157 "cs_valid=%d cs_tainted=%d cs_nx=%d\n",
1158 pager,
1159 offset, cur_offset,
1160 (sub_page-sub_page_idx)*FOURK_PAGE_SIZE,
39037602 1161 src_page_object,
3e170ce0
A
1162 src_page->offset + offset_in_src_page,
1163 *(uint64_t *)(dst_vaddr +
1164 ((sub_page-sub_page_idx) *
1165 FOURK_PAGE_SIZE)),
1166 *(uint64_t *)(dst_vaddr +
1167 ((sub_page-sub_page_idx) *
1168 FOURK_PAGE_SIZE) +
1169 8),
39037602 1170 src_page_object->code_signed,
3e170ce0
A
1171 subpg_validated,
1172 !!(subpg_tainted & CS_VALIDATE_TAINTED),
1173 !!(subpg_tainted & CS_VALIDATE_NX));
1174 }
1175
1176#if __x86_64__ || __arm__ || __arm64__
1177 /* we used the 1-to-1 mapping of physical memory */
1178 src_vaddr = 0;
1179#else /* __x86_64__ || __arm__ || __arm64__ */
1180 /*
1181 * Remove the pmap mapping of the source page
1182 * in the kernel.
1183 */
1184 pmap_remove(kernel_pmap,
1185 (addr64_t) src_vaddr,
1186 (addr64_t) src_vaddr + PAGE_SIZE_64);
1187#endif /* __x86_64__ || __arm__ || __arm64__ */
1188
1189 src_fault_done:
1190 /*
1191 * Cleanup the result of vm_fault_page().
1192 */
1193 if (src_page) {
39037602 1194 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
3e170ce0 1195
3e170ce0
A
1196 PAGE_WAKEUP_DONE(src_page);
1197 src_page = VM_PAGE_NULL;
1198 vm_object_paging_end(src_page_object);
1199 vm_object_unlock(src_page_object);
1200 if (top_page) {
1201 vm_object_t top_object;
1202
39037602 1203 top_object = VM_PAGE_OBJECT(top_page);
3e170ce0
A
1204 vm_object_lock(top_object);
1205 VM_PAGE_FREE(top_page);
1206 top_page = VM_PAGE_NULL;
1207 vm_object_paging_end(top_object);
1208 vm_object_unlock(top_object);
1209 }
1210 }
1211 }
1212 if (num_subpg_signed > 0) {
1213 /* some code-signing involved with this 16K page */
1214 if (num_subpg_tainted > 0) {
1215 /* a tainted subpage taints entire 16K page */
1216 UPL_SET_CS_TAINTED(upl_pl,
1217 cur_offset / PAGE_SIZE,
1218 TRUE);
1219 /* also mark as "validated" for consisteny */
1220 UPL_SET_CS_VALIDATED(upl_pl,
1221 cur_offset / PAGE_SIZE,
1222 TRUE);
1223 } else if (num_subpg_validated == num_subpg_signed) {
1224 /*
1225 * All the code-signed 4K subpages of this
1226 * 16K page are validated: our 16K page is
1227 * considered validated.
1228 */
1229 UPL_SET_CS_VALIDATED(upl_pl,
1230 cur_offset / PAGE_SIZE,
1231 TRUE);
1232 }
1233 if (num_subpg_nx > 0) {
1234 UPL_SET_CS_NX(upl_pl,
1235 cur_offset / PAGE_SIZE,
1236 TRUE);
1237 }
1238 }
1239 }
1240
1241done:
1242 if (upl != NULL) {
1243 /* clean up the UPL */
1244
1245 /*
1246 * The pages are currently dirty because we've just been
1247 * writing on them, but as far as we're concerned, they're
1248 * clean since they contain their "original" contents as
1249 * provided by us, the pager.
1250 * Tell the UPL to mark them "clean".
1251 */
1252 upl_clear_dirty(upl, TRUE);
1253
1254 /* abort or commit the UPL */
1255 if (retval != KERN_SUCCESS) {
1256 upl_abort(upl, 0);
1257 if (retval == KERN_ABORTED) {
1258 wait_result_t wait_result;
1259
1260 /*
1261 * We aborted the fault and did not provide
1262 * any contents for the requested pages but
1263 * the pages themselves are not invalid, so
1264 * let's return success and let the caller
1265 * retry the fault, in case it might succeed
1266 * later (when the decryption code is up and
1267 * running in the kernel, for example).
1268 */
1269 retval = KERN_SUCCESS;
1270 /*
1271 * Wait a little bit first to avoid using
1272 * too much CPU time retrying and failing
1273 * the same fault over and over again.
1274 */
1275 wait_result = assert_wait_timeout(
1276 (event_t) fourk_pager_data_request,
1277 THREAD_UNINT,
1278 10000, /* 10ms */
1279 NSEC_PER_USEC);
1280 assert(wait_result == THREAD_WAITING);
1281 wait_result = thread_block(THREAD_CONTINUE_NULL);
1282 assert(wait_result == THREAD_TIMED_OUT);
1283 }
1284 } else {
1285 boolean_t empty;
1286 upl_commit_range(upl, 0, upl->size,
1287 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
1288 upl_pl, pl_count, &empty);
1289 }
1290
1291 /* and deallocate the UPL */
1292 upl_deallocate(upl);
1293 upl = NULL;
1294 }
1295 if (kernel_mapping != 0) {
1296 /* clean up the mapping of the source and destination pages */
1297 kr = vm_map_remove(kernel_map,
1298 kernel_mapping,
1299 kernel_mapping + (2 * PAGE_SIZE_64),
1300 VM_MAP_NO_FLAGS);
1301 assert(kr == KERN_SUCCESS);
1302 kernel_mapping = 0;
1303 src_vaddr = 0;
1304 dst_vaddr = 0;
1305 }
1306
1307 return retval;
1308}
1309
1310
1311
1312kern_return_t
1313fourk_pager_populate(
1314 memory_object_t mem_obj,
1315 boolean_t overwrite,
1316 int index,
1317 vm_object_t new_backing_object,
1318 vm_object_offset_t new_backing_offset,
1319 vm_object_t *old_backing_object,
1320 vm_object_offset_t *old_backing_offset)
1321{
1322 fourk_pager_t pager;
1323
1324 pager = fourk_pager_lookup(mem_obj);
1325 if (pager == NULL) {
1326 return KERN_INVALID_ARGUMENT;
1327 }
1328
1329 assert(pager->ref_count > 0);
1330 assert(pager->pager_control != MEMORY_OBJECT_CONTROL_NULL);
1331
1332 if (index < 0 || index > FOURK_PAGER_SLOTS) {
1333 return KERN_INVALID_ARGUMENT;
1334 }
1335
1336 if (!overwrite &&
1337 (pager->slots[index].backing_object != (vm_object_t) -1 ||
1338 pager->slots[index].backing_offset != (vm_object_offset_t) -1)) {
1339 return KERN_INVALID_ADDRESS;
1340 }
1341
1342 *old_backing_object = pager->slots[index].backing_object;
1343 *old_backing_offset = pager->slots[index].backing_offset;
1344
1345 pager->slots[index].backing_object = new_backing_object;
1346 pager->slots[index].backing_offset = new_backing_offset;
1347
1348 return KERN_SUCCESS;
1349}
1350