]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/user_ldt.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / i386 / user_ldt.c
index 5074e296baf8e34a90f10c3ac20e0ceb2187b262..afb9795a3f4cec50088f57dbed5d96e45c1c63cb 100644 (file)
@@ -1,16 +1,19 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
- * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
  * 
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
@@ -20,7 +23,7 @@
  * Please see the License for the specific language governing rights and
  * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
@@ -56,7 +59,7 @@
 
 /*
  * User LDT management.
- * Each thread in a task may have its own LDT.
+ * Each task may have its own LDT.
  */
 
 #include <kern/kalloc.h>
 #include <i386/seg.h>
 #include <i386/thread.h>
 #include <i386/user_ldt.h>
+#include <i386/mp_desc.h>
+#include <i386/proc_reg.h>
+#include <i386/machdep_call.h>
+#include <i386/mp.h>
+#include <i386/machine_routines.h>
 
-char   acc_type[8][3] = {
-    /* code    stack   data */
-    {  0,      0,      1       },      /* data */
-    {  0,      1,      1       },      /* data, writable */
-    {  0,      0,      1       },      /* data, expand-down */
-    {  0,      1,      1       },      /* data, writable, expand-down */
-    {  1,      0,      0       },      /* code */
-    {  1,      0,      1       },      /* code, readable */
-    {  1,      0,      0       },      /* code, conforming */
-    {  1,      0,      1       },      /* code, readable, conforming */
-};
-
-extern struct fake_descriptor ldt[];   /* for system call gate */
-
-#if 0
-/* Forward */
-
-extern boolean_t       selector_check(
-                               thread_t                thread,
-                               int                     sel,
-                               int                     type);
-
-boolean_t
-selector_check(
-       thread_t                thread,
-       int                     sel,
-       int                     type)
-{
-       struct user_ldt *ldt;
-       int     access;
-
-       ldt = thread->top_act->mact.pcb->ims.ldt;
-       if (ldt == 0) {
-           switch (type) {
-               case S_CODE:
-                   return sel == USER_CS;
-               case S_STACK:
-                   return sel == USER_DS;
-               case S_DATA:
-                   return sel == 0 ||
-                          sel == USER_CS ||
-                          sel == USER_DS;
-           }
-       }
+#include <sys/errno.h>
 
-       if (type != S_DATA && sel == 0)
-           return FALSE;
-       if ((sel & (SEL_LDTS|SEL_PL)) != (SEL_LDTS|SEL_PL_U)
-         || sel > ldt->desc.limit_low)
-               return FALSE;
-
-       access = ldt->ldt[sel_idx(sel)].access;
-       
-       if ((access & (ACC_P|ACC_PL|ACC_TYPE_USER))
-               != (ACC_P|ACC_PL_U|ACC_TYPE_USER))
-           return FALSE;
-               /* present, pl == pl.user, not system */
-
-       return acc_type[(access & 0xe)>>1][type];
-}
+static void user_ldt_set_action(void *);
 
 /*
  * Add the descriptors to the LDT, starting with
  * the descriptor for 'first_selector'.
  */
 
-kern_return_t
+int
 i386_set_ldt(
-       thread_act_t            thr_act,
-       int                     first_selector,
-       descriptor_list_t       desc_list,
-       mach_msg_type_number_t  count)
+       int                     *retval,
+       uint32_t                start_sel,
+       uint32_t                descs,  /* out */
+       uint32_t                num_sels)
 {
-       user_ldt_t      new_ldt, old_ldt, temp;
+       user_ldt_t      new_ldt, old_ldt;
        struct real_descriptor *dp;
-       int             i;
-       int             min_selector = 0;
-       pcb_t           pcb;
-       vm_size_t       ldt_size_needed;
-       int             first_desc = sel_idx(first_selector);
-       vm_map_copy_t   old_copy_object;
-       thread_t                thread;
-
-       if (first_desc < min_selector || first_desc > 8191)
-           return KERN_INVALID_ARGUMENT;
-       if (first_desc + count >= 8192)
-           return KERN_INVALID_ARGUMENT;
-       if (thr_act == THR_ACT_NULL)
-           return KERN_INVALID_ARGUMENT;
-       if ((thread = act_lock_thread(thr_act)) == THREAD_NULL) {
-               act_unlock_thread(thr_act);
-           return KERN_INVALID_ARGUMENT;
-       }
-       if (thread == current_thread())
-               min_selector = LDTSZ;
-       act_unlock_thread(thr_act);
+       unsigned int    i;
+       unsigned int    min_selector = LDTSZ_MIN;       /* do not allow the system selectors to be changed */
+       task_t          task = current_task();
+       unsigned int    ldt_count;
+       kern_return_t err;
 
-       /*
-        * We must copy out desc_list to the kernel map, and wire
-        * it down (we touch it while the PCB is locked).
-        *
-        * We make a copy of the copyin object, and clear
-        * out the old one, so that the MIG stub will have a
-        * a empty (but valid) copyin object to discard.
-        */
-       {
-           kern_return_t       kr;
-           vm_offset_t         dst_addr;
-
-           old_copy_object = (vm_map_copy_t) desc_list;
-
-           kr = vm_map_copyout(ipc_kernel_map, &dst_addr,
-                               vm_map_copy_copy(old_copy_object));
-           if (kr != KERN_SUCCESS)
-               return kr;
-
-           (void) vm_map_wire(ipc_kernel_map,
-                       trunc_page(dst_addr),
-                       round_page(dst_addr + 
-                               count * sizeof(struct real_descriptor)),
-                       VM_PROT_READ|VM_PROT_WRITE, FALSE);
-           desc_list = (descriptor_list_t) dst_addr;
-       }
+       if (start_sel != LDT_AUTO_ALLOC
+           && (start_sel != 0 || num_sels != 0)
+           && (start_sel < min_selector || start_sel >= LDTSZ))
+           return EINVAL;
+       if (start_sel != LDT_AUTO_ALLOC
+           && start_sel + num_sels > LDTSZ)
+           return EINVAL;
 
-       for (i = 0, dp = (struct real_descriptor *) desc_list;
-            i < count;
-            i++, dp++)
-       {
-           switch (dp->access & ~ACC_A) {
-               case 0:
-               case ACC_P:
-                   /* valid empty descriptor */
-                   break;
-               case ACC_P | ACC_CALL_GATE:
-                   /* Mach kernel call */
-                   *dp = *(struct real_descriptor *)
-                               &ldt[sel_idx(USER_SCALL)];
-                   break;
-               case ACC_P | ACC_PL_U | ACC_DATA:
-               case ACC_P | ACC_PL_U | ACC_DATA_W:
-               case ACC_P | ACC_PL_U | ACC_DATA_E:
-               case ACC_P | ACC_PL_U | ACC_DATA_EW:
-               case ACC_P | ACC_PL_U | ACC_CODE:
-               case ACC_P | ACC_PL_U | ACC_CODE_R:
-               case ACC_P | ACC_PL_U | ACC_CODE_C:
-               case ACC_P | ACC_PL_U | ACC_CODE_CR:
-               case ACC_P | ACC_PL_U | ACC_CALL_GATE_16:
-               case ACC_P | ACC_PL_U | ACC_CALL_GATE:
-                   break;
-               default:
-                   (void) vm_map_remove(ipc_kernel_map, 
-                                        (vm_offset_t) desc_list,
-                                        count * sizeof(struct real_descriptor),
-                                        VM_MAP_REMOVE_KUNWIRE);
-                   return KERN_INVALID_ARGUMENT;
+       task_lock(task);
+       
+       old_ldt = task->i386_ldt;
+
+       if (start_sel == LDT_AUTO_ALLOC) {
+           if (old_ldt) {
+               unsigned int null_count;
+               struct real_descriptor null_ldt;
+               
+               bzero(&null_ldt, sizeof(null_ldt));
+
+               /*
+                * Look for null selectors among the already-allocated
+                * entries.
+                */
+               null_count = 0;
+               i = 0;
+               while (i < old_ldt->count)
+               {
+                   if (!memcmp(&old_ldt->ldt[i++], &null_ldt, sizeof(null_ldt))) {
+                       null_count++;
+                       if (null_count == num_sels)
+                           break;  /* break out of while loop */
+                   } else {
+                       null_count = 0;
+                   }
+               }
+
+               /*
+                * If we broke out of the while loop, i points to the selector
+                * after num_sels null selectors.  Otherwise it points to the end
+                * of the old LDTs, and null_count is the number of null selectors
+                * at the end. 
+                *
+                * Either way, there are null_count null selectors just prior to
+                * the i-indexed selector, and either null_count >= num_sels,
+                * or we're at the end, so we can extend.
+                */
+               start_sel = old_ldt->start + i - null_count;
+           } else {
+               start_sel = LDTSZ_MIN;
+           }
+               
+           if (start_sel + num_sels > LDTSZ) {
+               task_unlock(task);
+               return ENOMEM;
            }
        }
-       ldt_size_needed = sizeof(struct real_descriptor)
-                       * (first_desc + count);
-
-       pcb = thr_act->mact.pcb;
-       new_ldt = 0;
-    Retry:
-       simple_lock(&pcb->lock);
-       old_ldt = pcb->ims.ldt;
-       if (old_ldt == 0 ||
-           old_ldt->desc.limit_low + 1 < ldt_size_needed)
-       {
+
+       if (start_sel == 0 && num_sels == 0) {
+           new_ldt = NULL;
+       } else {
            /*
-            * No old LDT, or not big enough
+            * Allocate new LDT
             */
-           if (new_ldt == 0) {
-               simple_unlock(&pcb->lock);
-
-               new_ldt = (user_ldt_t) kalloc(ldt_size_needed
-                                             + sizeof(struct real_descriptor));
-               new_ldt->desc.limit_low   = ldt_size_needed - 1;
-               new_ldt->desc.limit_high  = 0;
-               new_ldt->desc.base_low    = 
-                               ((vm_offset_t)&new_ldt->ldt[0]) & 0xffff;
-               new_ldt->desc.base_med    = 
-                               (((vm_offset_t)&new_ldt->ldt[0]) >> 16)
-                                                & 0xff;
-               new_ldt->desc.base_high   = 
-                               ((vm_offset_t)&new_ldt->ldt[0]) >> 24;
-               new_ldt->desc.access      = ACC_P | ACC_LDT;
-               new_ldt->desc.granularity = 0;
-
-               goto Retry;
+
+           unsigned int    begin_sel = start_sel;
+           unsigned int    end_sel = begin_sel + num_sels;
+           
+           if (old_ldt != NULL) {
+               if (old_ldt->start < begin_sel)
+                   begin_sel = old_ldt->start;
+               if (old_ldt->start + old_ldt->count > end_sel)
+                   end_sel = old_ldt->start + old_ldt->count;
+           }
+
+           ldt_count = end_sel - begin_sel;
+
+           new_ldt = (user_ldt_t)kalloc(sizeof(struct user_ldt) + (ldt_count * sizeof(struct real_descriptor)));
+           if (new_ldt == NULL) {
+               task_unlock(task);
+               return ENOMEM;
            }
 
+           new_ldt->start = begin_sel;
+           new_ldt->count = ldt_count;
+
            /*
             * Have new LDT.  If there was a an old ldt, copy descriptors
-            * from old to new.  Otherwise copy the default ldt.
+            * from old to new.
             */
            if (old_ldt) {
-               bcopy((char *)&old_ldt->ldt[0],
-                     (char *)&new_ldt->ldt[0],
-                     old_ldt->desc.limit_low + 1);
+               bcopy(&old_ldt->ldt[0],
+                     &new_ldt->ldt[old_ldt->start - begin_sel],
+                     old_ldt->count * sizeof(struct real_descriptor));
+
+               /*
+                * If the old and new LDTs are non-overlapping, fill the 
+                * center in with null selectors.
+                */
+                                
+               if (old_ldt->start + old_ldt->count < start_sel)
+                   bzero(&new_ldt->ldt[old_ldt->count],
+                         (start_sel - (old_ldt->start + old_ldt->count)) * sizeof(struct real_descriptor));
+               else if (old_ldt->start > start_sel + num_sels)
+                   bzero(&new_ldt->ldt[num_sels],
+                         (old_ldt->start - (start_sel + num_sels)) * sizeof(struct real_descriptor));
            }
-           else if (thr_act == current_act()) {
-               struct real_descriptor template = {0, 0, 0, ACC_P, 0, 0 ,0};
-
-               for (dp = &new_ldt->ldt[0], i = 0; i < first_desc; i++, dp++) {
-                   if (i < LDTSZ)
-                       *dp = *(struct real_descriptor *) &ldt[i];
-                   else
-                       *dp = template;
+
+           /*
+            * Install new descriptors.
+            */
+           if (descs != 0) {
+               err = copyin(descs, (char *)&new_ldt->ldt[start_sel - begin_sel],
+                            num_sels * sizeof(struct real_descriptor));
+               if (err != 0)
+               {
+                   task_unlock(task);
+                   user_ldt_free(new_ldt);
+                   return err;
                }
+           } else {
+               bzero(&new_ldt->ldt[start_sel - begin_sel], num_sels * sizeof(struct real_descriptor));
            }
 
-           temp = old_ldt;
-           old_ldt = new_ldt;  /* use new LDT from now on */
-           new_ldt = temp;     /* discard old LDT */
-
-           pcb->ims.ldt = old_ldt;     /* new LDT for thread */
+           /*
+            * Validate descriptors.
+            * Only allow descriptors with user priviledges.
+            */
+           for (i = 0, dp = (struct real_descriptor *) &new_ldt->ldt[start_sel - begin_sel];
+                i < num_sels;
+                i++, dp++)
+           {
+               switch (dp->access & ~ACC_A) {
+                   case 0:
+                   case ACC_P:
+                       /* valid empty descriptor */
+                       break;
+                   case ACC_P | ACC_PL_U | ACC_DATA:
+                   case ACC_P | ACC_PL_U | ACC_DATA_W:
+                   case ACC_P | ACC_PL_U | ACC_DATA_E:
+                   case ACC_P | ACC_PL_U | ACC_DATA_EW:
+                   case ACC_P | ACC_PL_U | ACC_CODE:
+                   case ACC_P | ACC_PL_U | ACC_CODE_R:
+                   case ACC_P | ACC_PL_U | ACC_CODE_C:
+                   case ACC_P | ACC_PL_U | ACC_CODE_CR:
+                   case ACC_P | ACC_PL_U | ACC_CALL_GATE_16:
+                   case ACC_P | ACC_PL_U | ACC_CALL_GATE:
+                       break;
+                   default:
+                       task_unlock(task);
+                       user_ldt_free(new_ldt);
+                       return EACCES;
+               }
+           }
        }
 
+       task->i386_ldt = new_ldt; /* new LDT for task */
+
        /*
-        * Install new descriptors.
+        * Switch to new LDT.  We need to do this on all CPUs, since
+        * another thread in this same task may be currently running,
+        * and we need to make sure the new LDT is in place
+        * throughout the task before returning to the user.
         */
-       bcopy((char *)desc_list,
-             (char *)&old_ldt->ldt[first_desc],
-             count * sizeof(struct real_descriptor));
-
-       simple_unlock(&pcb->lock);
+       mp_rendezvous_no_intrs(user_ldt_set_action, task);
 
-       if (new_ldt)
-           kfree((vm_offset_t)new_ldt,
-                 new_ldt->desc.limit_low+1+sizeof(struct real_descriptor));
+       task_unlock(task);
 
-       /*
-        * Free the descriptor list.
+       /* free old LDT.  We can't do this until after we've
+        * rendezvoused with all CPUs, in case another thread
+        * in this task was in the process of context switching.
         */
-       (void) vm_map_remove(ipc_kernel_map, (vm_offset_t) desc_list,
-                       count * sizeof(struct real_descriptor),
-                       VM_MAP_REMOVE_KUNWIRE);
-       return KERN_SUCCESS;
+       if (old_ldt)
+           user_ldt_free(old_ldt);
+
+       *retval = start_sel;
+
+       return 0;
 }
 
-kern_return_t
+int
 i386_get_ldt(
-       thread_act_t            thr_act,
-       int                     first_selector,
-       int                     selector_count, /* number wanted */
-       descriptor_list_t       *desc_list,     /* in/out */
-       mach_msg_type_number_t  *count)         /* in/out */
+       int                     *retval,
+       uint32_t                start_sel,
+       uint32_t                descs,  /* out */
+       uint32_t                num_sels)
 {
-       struct user_ldt *user_ldt;
-       pcb_t           pcb = thr_act->mact.pcb;
-       int             first_desc = sel_idx(first_selector);
+       user_ldt_t      user_ldt;
+       task_t          task = current_task();
        unsigned int    ldt_count;
-       vm_size_t       ldt_size;
-       vm_size_t       size, size_needed;
-       vm_offset_t     addr;
-       thread_t                thread;
-
-       if (thr_act == THR_ACT_NULL || (thread = thr_act->thread)==THREAD_NULL)
-           return KERN_INVALID_ARGUMENT;
-
-       if (first_desc < 0 || first_desc > 8191)
-           return KERN_INVALID_ARGUMENT;
-       if (first_desc + selector_count >= 8192)
-           return KERN_INVALID_ARGUMENT;
-
-       addr = 0;
-       size = 0;
-
-       for (;;) {
-           simple_lock(&pcb->lock);
-           user_ldt = pcb->ims.ldt;
-           if (user_ldt == 0) {
-               simple_unlock(&pcb->lock);
-               if (addr)
-                   kmem_free(ipc_kernel_map, addr, size);
-               *count = 0;
-               return KERN_SUCCESS;
-           }
+       kern_return_t   err;
 
-           /*
-            * Find how many descriptors we should return.
-            */
-           ldt_count = (user_ldt->desc.limit_low + 1) /
-                       sizeof (struct real_descriptor);
-           ldt_count -= first_desc;
-           if (ldt_count > selector_count)
-               ldt_count = selector_count;
+       if (start_sel >= 8192)
+           return EINVAL;
+       if (start_sel + num_sels > 8192)
+           return EINVAL;
+       if (descs == 0)
+           return EINVAL;
 
-           ldt_size = ldt_count * sizeof(struct real_descriptor);
+       task_lock(task);
 
-           /*
-            * Do we have the memory we need?
-            */
-           if (ldt_count <= *count)
-               break;          /* fits in-line */
+       user_ldt = task->i386_ldt;
+       err = 0;
 
-           size_needed = round_page(ldt_size);
-           if (size_needed <= size)
-               break;
+       /*
+        * copy out the descriptors
+        */
 
-           /*
-            * Unlock the pcb and allocate more memory
-            */
-           simple_unlock(&pcb->lock);
+       if (user_ldt != 0)
+           ldt_count = user_ldt->start + user_ldt->count;
+       else
+           ldt_count = LDTSZ_MIN;
 
-           if (size != 0)
-               kmem_free(ipc_kernel_map, addr, size);
 
-           size = size_needed;
+       if (start_sel < ldt_count)
+       {
+           unsigned int copy_sels = num_sels;
 
-           if (kmem_alloc(ipc_kernel_map, &addr, size)
-                       != KERN_SUCCESS)
-               return KERN_RESOURCE_SHORTAGE;
+           if (start_sel + num_sels > ldt_count)
+               copy_sels = ldt_count - start_sel;
+
+           err = copyout((char *)(current_ldt() + start_sel),
+                         descs, copy_sels * sizeof(struct real_descriptor));
        }
 
-       /*
-        * copy out the descriptors
-        */
-       bcopy((char *)&user_ldt->ldt[first_desc],
-             (char *)addr,
-             ldt_size);
-       *count = ldt_count;
-       simple_unlock(&pcb->lock);
+       task_unlock(task);
 
-       if (addr) {
-           vm_size_t           size_used, size_left;
-           vm_map_copy_t       memory;
+       *retval = ldt_count;
 
-           /*
-            * Free any unused memory beyond the end of the last page used
-            */
-           size_used = round_page(ldt_size);
-           if (size_used != size)
-               kmem_free(ipc_kernel_map,
-                       addr + size_used, size - size_used);
+       return err;
+}
 
-           /*
-            * Zero the remainder of the page being returned.
-            */
-           size_left = size_used - ldt_size;
-           if (size_left > 0)
-               bzero((char *)addr + ldt_size, size_left);
+void
+user_ldt_free(
+       user_ldt_t      user_ldt)
+{
+       kfree(user_ldt, sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor)));
+}
 
-           /*
-            * Unwire the memory and make it into copyin form.
-            */
-           (void) vm_map_unwire(ipc_kernel_map, trunc_page(addr),
-                                round_page(addr + size_used), FALSE);
-           (void) vm_map_copyin(ipc_kernel_map, addr, size_used,
-                               TRUE, &memory);
-           *desc_list = (descriptor_list_t) memory;
+user_ldt_t
+user_ldt_copy(
+       user_ldt_t      user_ldt)
+{
+       if (user_ldt != NULL) {
+           size_t      size = sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor));
+           user_ldt_t  new_ldt = (user_ldt_t)kalloc(size);
+           if (new_ldt != NULL)
+               bcopy(user_ldt, new_ldt, size);
+           return new_ldt;
        }
+       
+       return 0;
+}
 
-       return KERN_SUCCESS;
+void
+user_ldt_set_action(
+       void *arg)
+{
+       task_t          arg_task = (task_t)arg;
+
+       if (arg_task == current_task()) {
+           user_ldt_set(current_thread());
+       }
 }
 
-#endif 
+/*
+ * Set the LDT for the given thread on the current CPU.  Should be invoked
+ * with interrupts disabled.
+ */
 void
-user_ldt_free(
-       user_ldt_t      user_ldt)
+user_ldt_set(
+       thread_t thread)
 {
-       kfree((vm_offset_t)user_ldt,
-               user_ldt->desc.limit_low+1+sizeof(struct real_descriptor));
+        task_t         task = thread->task;
+       user_ldt_t      user_ldt;
+
+       user_ldt = task->i386_ldt;
+
+       if (user_ldt != 0) {
+           struct real_descriptor *ldtp = (struct real_descriptor *)current_ldt();
+
+           if (user_ldt->start > LDTSZ_MIN) {
+               bzero(&ldtp[LDTSZ_MIN],
+                     sizeof(struct real_descriptor) * (user_ldt->start - LDTSZ_MIN));
+           }
+           
+           bcopy(user_ldt->ldt, &ldtp[user_ldt->start],
+                 sizeof(struct real_descriptor) * (user_ldt->count));
+
+           gdt_desc_p(USER_LDT)->limit_low = (sizeof(struct real_descriptor) * (user_ldt->start + user_ldt->count)) - 1;
+
+           ml_cpu_set_ldt(USER_LDT);
+       } else {
+           ml_cpu_set_ldt(KERNEL_LDT);
+       }
 }