+void
+task_backing_store_privileged(
+ task_t task)
+{
+ task_lock(task);
+ task->priv_flags |= VM_BACKING_STORE_PRIV;
+ task_unlock(task);
+ return;
+}
+
+
+void
+task_set_64bit(
+ task_t task,
+ boolean_t is64bit)
+{
+#ifdef __i386__
+ thread_t thread;
+#endif /* __i386__ */
+ int vm_flags = 0;
+
+ if (is64bit) {
+ if (task_has_64BitAddr(task))
+ return;
+
+ task_set_64BitAddr(task);
+ } else {
+ if ( !task_has_64BitAddr(task))
+ return;
+
+ /*
+ * Deallocate all memory previously allocated
+ * above the 32-bit address space, since it won't
+ * be accessible anymore.
+ */
+ /* remove regular VM map entries & pmap mappings */
+ (void) vm_map_remove(task->map,
+ (vm_map_offset_t) VM_MAX_ADDRESS,
+ MACH_VM_MAX_ADDRESS,
+ 0);
+#ifdef __ppc__
+ /* LP64todo - make this clean */
+ /*
+ * PPC51: ppc64 is limited to 51-bit addresses.
+ * Memory mapped above that limit is handled specially
+ * at the pmap level, so let pmap clean the commpage mapping
+ * explicitly...
+ */
+ pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
+ /* ... and avoid regular pmap cleanup */
+ vm_flags |= VM_MAP_REMOVE_NO_PMAP_CLEANUP;
+#endif /* __ppc__ */
+ /* remove the higher VM mappings */
+ (void) vm_map_remove(task->map,
+ MACH_VM_MAX_ADDRESS,
+ 0xFFFFFFFFFFFFF000ULL,
+ vm_flags);
+ task_clear_64BitAddr(task);
+ }
+ /* FIXME: On x86, the thread save state flavor can diverge from the
+ * task's 64-bit feature flag due to the 32-bit/64-bit register save
+ * state dichotomy. Since we can be pre-empted in this interval,
+ * certain routines may observe the thread as being in an inconsistent
+ * state with respect to its task's 64-bitness.
+ */
+#ifdef __i386__
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ machine_thread_switch_addrmode(thread);
+ }
+#endif /* __i386__ */
+}
+