+ kern_return_t kr;
+ vm_map_offset_t page_map_offset;
+ vm_map_size_t map_size;
+ vm_object_offset_t object_offset;
+ int i;
+
+
+ if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
+ assert(page->busy);
+ /*
+ * Use one of the pre-allocated kernel virtual addresses
+ * and just enter the VM page in the kernel address space
+ * at that virtual address.
+ */
+ simple_lock(&vm_paging_lock);
+
+ /*
+ * Try and find an available kernel virtual address
+ * from our pre-allocated pool.
+ */
+ page_map_offset = 0;
+ for (;;) {
+ for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
+ if (vm_paging_page_inuse[i] == FALSE) {
+ page_map_offset =
+ vm_paging_base_address +
+ (i * PAGE_SIZE);
+ break;
+ }
+ }
+ if (page_map_offset != 0) {
+ /* found a space to map our page ! */
+ break;
+ }
+
+ if (can_unlock_object) {
+ /*
+ * If we can afford to unlock the VM object,
+ * let's take the slow path now...
+ */
+ break;
+ }
+ /*
+ * We can't afford to unlock the VM object, so
+ * let's wait for a space to become available...
+ */
+ vm_paging_page_waiter_total++;
+ vm_paging_page_waiter++;
+ thread_sleep_fast_usimple_lock(&vm_paging_page_waiter,
+ &vm_paging_lock,
+ THREAD_UNINT);
+ vm_paging_page_waiter--;
+ /* ... and try again */
+ }
+
+ if (page_map_offset != 0) {
+ /*
+ * We found a kernel virtual address;
+ * map the physical page to that virtual address.
+ */
+ if (i > vm_paging_max_index) {
+ vm_paging_max_index = i;
+ }
+ vm_paging_page_inuse[i] = TRUE;
+ simple_unlock(&vm_paging_lock);
+
+ if (page->pmapped == FALSE) {
+ pmap_sync_page_data_phys(page->phys_page);
+ }
+ page->pmapped = TRUE;
+
+ /*
+ * Keep the VM object locked over the PMAP_ENTER
+ * and the actual use of the page by the kernel,
+ * or this pmap mapping might get undone by a
+ * vm_object_pmap_protect() call...
+ */
+ PMAP_ENTER(kernel_pmap,
+ page_map_offset,
+ page,
+ VM_PROT_DEFAULT,
+ ((int) page->object->wimg_bits &
+ VM_WIMG_MASK),
+ TRUE);
+ vm_paging_objects_mapped++;
+ vm_paging_pages_mapped++;
+ *address = page_map_offset;
+
+ /* all done and mapped, ready to use ! */
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * We ran out of pre-allocated kernel virtual
+ * addresses. Just map the page in the kernel
+ * the slow and regular way.
+ */
+ vm_paging_no_kernel_page++;
+ simple_unlock(&vm_paging_lock);
+ }
+
+ if (! can_unlock_object) {
+ return KERN_NOT_SUPPORTED;
+ }
+
+ object_offset = vm_object_trunc_page(offset);
+ map_size = vm_map_round_page(*size);
+
+ /*
+ * Try and map the required range of the object
+ * in the kernel_map
+ */
+
+ vm_object_reference_locked(object); /* for the map entry */
+ vm_object_unlock(object);
+
+ kr = vm_map_enter(kernel_map,
+ address,
+ map_size,
+ 0,
+ VM_FLAGS_ANYWHERE,
+ object,
+ object_offset,
+ FALSE,
+ VM_PROT_DEFAULT,
+ VM_PROT_ALL,
+ VM_INHERIT_NONE);
+ if (kr != KERN_SUCCESS) {
+ *address = 0;
+ *size = 0;
+ vm_object_deallocate(object); /* for the map entry */
+ vm_object_lock(object);
+ return kr;
+ }
+
+ *size = map_size;
+
+ /*
+ * Enter the mapped pages in the page table now.
+ */
+ vm_object_lock(object);
+ /*
+ * VM object must be kept locked from before PMAP_ENTER()
+ * until after the kernel is done accessing the page(s).
+ * Otherwise, the pmap mappings in the kernel could be
+ * undone by a call to vm_object_pmap_protect().
+ */
+
+ for (page_map_offset = 0;
+ map_size != 0;
+ map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
+ unsigned int cache_attr;
+
+ page = vm_page_lookup(object, offset + page_map_offset);
+ if (page == VM_PAGE_NULL) {
+ printf("vm_paging_map_object: no page !?");
+ vm_object_unlock(object);
+ kr = vm_map_remove(kernel_map, *address, *size,
+ VM_MAP_NO_FLAGS);
+ assert(kr == KERN_SUCCESS);
+ *address = 0;
+ *size = 0;
+ vm_object_lock(object);
+ return KERN_MEMORY_ERROR;
+ }
+ if (page->pmapped == FALSE) {
+ pmap_sync_page_data_phys(page->phys_page);
+ }
+ page->pmapped = TRUE;
+ page->wpmapped = TRUE;
+ cache_attr = ((unsigned int) object->wimg_bits) & VM_WIMG_MASK;
+
+ //assert(pmap_verify_free(page->phys_page));
+ PMAP_ENTER(kernel_pmap,
+ *address + page_map_offset,
+ page,
+ VM_PROT_DEFAULT,
+ cache_attr,
+ TRUE);
+ }
+
+ vm_paging_objects_mapped_slow++;
+ vm_paging_pages_mapped_slow += map_size / PAGE_SIZE_64;
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * ENCRYPTED SWAP:
+ * vm_paging_unmap_object:
+ * Unmaps part of a VM object's pages from the kernel
+ * virtual address space.
+ * Context:
+ * The VM object is locked. This lock will get
+ * dropped and re-acquired though.
+ */
+void
+vm_paging_unmap_object(
+ vm_object_t object,
+ vm_map_offset_t start,
+ vm_map_offset_t end)
+{
+ kern_return_t kr;
+ int i;
+
+ if ((vm_paging_base_address == 0) ||
+ (start < vm_paging_base_address) ||
+ (end > (vm_paging_base_address
+ + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
+ /*
+ * We didn't use our pre-allocated pool of
+ * kernel virtual address. Deallocate the
+ * virtual memory.
+ */
+ if (object != VM_OBJECT_NULL) {
+ vm_object_unlock(object);
+ }
+ kr = vm_map_remove(kernel_map, start, end, VM_MAP_NO_FLAGS);
+ if (object != VM_OBJECT_NULL) {
+ vm_object_lock(object);
+ }
+ assert(kr == KERN_SUCCESS);
+ } else {
+ /*
+ * We used a kernel virtual address from our
+ * pre-allocated pool. Put it back in the pool
+ * for next time.
+ */
+ assert(end - start == PAGE_SIZE);
+ i = (start - vm_paging_base_address) >> PAGE_SHIFT;
+
+ /* undo the pmap mapping */
+ pmap_remove(kernel_pmap, start, end);
+
+ simple_lock(&vm_paging_lock);
+ vm_paging_page_inuse[i] = FALSE;
+ if (vm_paging_page_waiter) {
+ thread_wakeup(&vm_paging_page_waiter);
+ }
+ simple_unlock(&vm_paging_lock);
+ }
+}
+
+#if CRYPTO
+/*
+ * Encryption data.
+ * "iv" is the "initial vector". Ideally, we want to
+ * have a different one for each page we encrypt, so that
+ * crackers can't find encryption patterns too easily.
+ */
+#define SWAP_CRYPT_AES_KEY_SIZE 128 /* XXX 192 and 256 don't work ! */
+boolean_t swap_crypt_ctx_initialized = FALSE;
+aes_32t swap_crypt_key[8]; /* big enough for a 256 key */
+aes_ctx swap_crypt_ctx;
+const unsigned char swap_crypt_null_iv[AES_BLOCK_SIZE] = {0xa, };
+
+#if DEBUG
+boolean_t swap_crypt_ctx_tested = FALSE;
+unsigned char swap_crypt_test_page_ref[4096] __attribute__((aligned(4096)));
+unsigned char swap_crypt_test_page_encrypt[4096] __attribute__((aligned(4096)));
+unsigned char swap_crypt_test_page_decrypt[4096] __attribute__((aligned(4096)));
+#endif /* DEBUG */
+
+extern u_long random(void);
+
+/*
+ * Initialize the encryption context: key and key size.
+ */
+void swap_crypt_ctx_initialize(void); /* forward */
+void
+swap_crypt_ctx_initialize(void)
+{
+ unsigned int i;
+
+ /*
+ * No need for locking to protect swap_crypt_ctx_initialized
+ * because the first use of encryption will come from the
+ * pageout thread (we won't pagein before there's been a pageout)
+ * and there's only one pageout thread.
+ */
+ if (swap_crypt_ctx_initialized == FALSE) {
+ for (i = 0;
+ i < (sizeof (swap_crypt_key) /
+ sizeof (swap_crypt_key[0]));
+ i++) {
+ swap_crypt_key[i] = random();
+ }
+ aes_encrypt_key((const unsigned char *) swap_crypt_key,
+ SWAP_CRYPT_AES_KEY_SIZE,
+ &swap_crypt_ctx.encrypt);
+ aes_decrypt_key((const unsigned char *) swap_crypt_key,
+ SWAP_CRYPT_AES_KEY_SIZE,
+ &swap_crypt_ctx.decrypt);
+ swap_crypt_ctx_initialized = TRUE;
+ }
+
+#if DEBUG
+ /*
+ * Validate the encryption algorithms.
+ */
+ if (swap_crypt_ctx_tested == FALSE) {
+ /* initialize */
+ for (i = 0; i < 4096; i++) {
+ swap_crypt_test_page_ref[i] = (char) i;
+ }
+ /* encrypt */
+ aes_encrypt_cbc(swap_crypt_test_page_ref,
+ swap_crypt_null_iv,
+ PAGE_SIZE / AES_BLOCK_SIZE,
+ swap_crypt_test_page_encrypt,
+ &swap_crypt_ctx.encrypt);
+ /* decrypt */
+ aes_decrypt_cbc(swap_crypt_test_page_encrypt,
+ swap_crypt_null_iv,
+ PAGE_SIZE / AES_BLOCK_SIZE,
+ swap_crypt_test_page_decrypt,
+ &swap_crypt_ctx.decrypt);
+ /* compare result with original */
+ for (i = 0; i < 4096; i ++) {
+ if (swap_crypt_test_page_decrypt[i] !=
+ swap_crypt_test_page_ref[i]) {
+ panic("encryption test failed");
+ }
+ }
+
+ /* encrypt again */
+ aes_encrypt_cbc(swap_crypt_test_page_decrypt,
+ swap_crypt_null_iv,
+ PAGE_SIZE / AES_BLOCK_SIZE,
+ swap_crypt_test_page_decrypt,
+ &swap_crypt_ctx.encrypt);
+ /* decrypt in place */
+ aes_decrypt_cbc(swap_crypt_test_page_decrypt,
+ swap_crypt_null_iv,
+ PAGE_SIZE / AES_BLOCK_SIZE,
+ swap_crypt_test_page_decrypt,
+ &swap_crypt_ctx.decrypt);
+ for (i = 0; i < 4096; i ++) {
+ if (swap_crypt_test_page_decrypt[i] !=
+ swap_crypt_test_page_ref[i]) {
+ panic("in place encryption test failed");
+ }
+ }
+
+ swap_crypt_ctx_tested = TRUE;
+ }
+#endif /* DEBUG */
+}
+
+/*
+ * ENCRYPTED SWAP:
+ * vm_page_encrypt:
+ * Encrypt the given page, for secure paging.
+ * The page might already be mapped at kernel virtual
+ * address "kernel_mapping_offset". Otherwise, we need
+ * to map it.
+ *
+ * Context:
+ * The page's object is locked, but this lock will be released
+ * and re-acquired.
+ * The page is busy and not accessible by users (not entered in any pmap).
+ */
+void
+vm_page_encrypt(
+ vm_page_t page,
+ vm_map_offset_t kernel_mapping_offset)
+{
+ kern_return_t kr;
+ vm_map_size_t kernel_mapping_size;
+ vm_offset_t kernel_vaddr;
+ union {
+ unsigned char aes_iv[AES_BLOCK_SIZE];
+ struct {
+ memory_object_t pager_object;
+ vm_object_offset_t paging_offset;
+ } vm;
+ } encrypt_iv;
+
+ if (! vm_pages_encrypted) {
+ vm_pages_encrypted = TRUE;
+ }