2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29  * @OSF_FREE_COPYRIGHT@ 
  32  * Mach Operating System 
  33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 
  34  * All Rights Reserved. 
  36  * Permission to use, copy, modify and distribute this software and its 
  37  * documentation is hereby granted, provided that both the copyright 
  38  * notice and this permission notice appear in all copies of the 
  39  * software, derivative works or modified versions, and any portions 
  40  * thereof, and that both notices appear in supporting documentation. 
  42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
  43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 
  44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 
  46  * Carnegie Mellon requests users of this software to return to 
  48  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU 
  49  *  School of Computer Science 
  50  *  Carnegie Mellon University 
  51  *  Pittsburgh PA 15213-3890 
  53  * any improvements or extensions that they make and grant Carnegie Mellon 
  54  * the rights to redistribute these changes. 
  60  *      Author: Avadis Tevanian, Jr. 
  62  *      This file contains the structure definitions for threads. 
  66  * Copyright (c) 1993 The University of Utah and 
  67  * the Computer Systems Laboratory (CSL).  All rights reserved. 
  69  * Permission to use, copy, modify and distribute this software and its 
  70  * documentation is hereby granted, provided that both the copyright 
  71  * notice and this permission notice appear in all copies of the 
  72  * software, derivative works or modified versions, and any portions 
  73  * thereof, and that both notices appear in supporting documentation. 
  75  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS 
  76  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF 
  77  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 
  79  * CSL requests users of this software to return to csl-dist@cs.utah.edu any 
  80  * improvements that they make and grant CSL redistribution rights. 
  84 #ifndef _KERN_THREAD_H_ 
  85 #define _KERN_THREAD_H_ 
  87 #include <mach/kern_return.h> 
  88 #include <mach/mach_types.h> 
  89 #include <mach/message.h> 
  90 #include <mach/boolean.h> 
  91 #include <mach/vm_param.h> 
  92 #include <mach/thread_info.h> 
  93 #include <mach/thread_status.h> 
  94 #include <mach/exception_types.h> 
  96 #include <kern/kern_types.h> 
  97 #include <vm/vm_kern.h> 
  99 #include <sys/cdefs.h> 
 101 #ifdef  MACH_KERNEL_PRIVATE 
 103 #include <mach_assert.h> 
 104 #include <mach_ldebug.h> 
 106 #include <ipc/ipc_types.h> 
 108 #include <mach/port.h> 
 109 #include <kern/cpu_number.h> 
 110 #include <kern/smp.h> 
 111 #include <kern/queue.h> 
 112 #include <kern/timer.h> 
 113 #include <kern/simple_lock.h> 
 114 #include <kern/locks.h> 
 115 #include <kern/sched.h> 
 116 #include <kern/sched_prim.h> 
 117 #include <mach/sfi_class.h> 
 118 #include <kern/thread_call.h> 
 119 #include <kern/thread_group.h> 
 120 #include <kern/timer_call.h> 
 121 #include <kern/task.h> 
 122 #include <kern/exception.h> 
 123 #include <kern/affinity.h> 
 124 #include <kern/debug.h> 
 125 #include <kern/block_hint.h> 
 127 #include <kern/waitq.h> 
 128 #include <san/kasan.h> 
 130 #include <ipc/ipc_kmsg.h> 
 132 #include <machine/cpu_data.h> 
 133 #include <machine/thread.h> 
 136 #include <stdatomic.h> 
 137 #include <machine/monotonic.h> 
 138 #endif /* MONOTONIC */ 
 141 /* Taskwatch related. TODO: find this a better home */ 
 142 typedef struct task_watcher task_watch_t
; 
 143 #endif /* CONFIG_EMBEDDED */ 
 148 #define THREAD_MAGIC 0x1234ABCDDCBA4321ULL 
 149         /* Ensure nothing uses &thread as a queue entry */ 
 150         uint64_t                thread_magic
; 
 151 #endif /* MACH_ASSERT */ 
 154          *      NOTE:   The runq field in the thread structure has an unusual 
 155          *      locking protocol.  If its value is PROCESSOR_NULL, then it is 
 156          *      locked by the thread_lock, but if its value is something else 
 157          *      then it is locked by the associated run queue lock. It is 
 158          *      set to PROCESSOR_NULL without holding the thread lock, but the 
 159          *      transition from PROCESSOR_NULL to non-null must be done 
 160          *      under the thread lock and the run queue lock. 
 162          *      New waitq APIs allow the 'links' and 'runq' fields to be 
 163          *      anywhere in the thread structure. 
 166                 queue_chain_t           runq_links
;     /* run queue links */ 
 167                 queue_chain_t           wait_links
;     /* wait queue links */ 
 170         processor_t             runq
;           /* run queue assignment */ 
 172         event64_t               wait_event
;     /* wait queue event */ 
 173         struct waitq           
*waitq
;          /* wait queue this thread is enqueued on */ 
 175         /* Data updated during assert_wait/thread_wakeup */ 
 177         decl_simple_lock_data(,sched_lock
)      /* scheduling lock (thread_lock()) */ 
 178         decl_simple_lock_data(,wake_lock
)       /* for thread stop / wait (wake_lock()) */ 
 180         integer_t               options
;                        /* options set by thread itself */ 
 181 #define TH_OPT_INTMASK          0x0003          /* interrupt / abort level */ 
 182 #define TH_OPT_VMPRIV           0x0004          /* may allocate reserved memory */ 
 183 #define TH_OPT_DTRACE           0x0008          /* executing under dtrace_probe */ 
 184 #define TH_OPT_SYSTEM_CRITICAL  0x0010          /* Thread must always be allowed to run - even under heavy load */ 
 185 #define TH_OPT_PROC_CPULIMIT    0x0020          /* Thread has a task-wide CPU limit applied to it */ 
 186 #define TH_OPT_PRVT_CPULIMIT    0x0040          /* Thread has a thread-private CPU limit applied to it */ 
 187 #define TH_OPT_IDLE_THREAD      0x0080          /* Thread is a per-processor idle thread */ 
 188 #define TH_OPT_GLOBAL_FORCED_IDLE       0x0100  /* Thread performs forced idle for thermal control */ 
 189 #define TH_OPT_SCHED_VM_GROUP   0x0200          /* Thread belongs to special scheduler VM group */ 
 190 #define TH_OPT_HONOR_QLIMIT     0x0400          /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */ 
 191 #define TH_OPT_SEND_IMPORTANCE  0x0800          /* Thread will allow importance donation from kernel rpc */ 
 192 #define TH_OPT_ZONE_GC          0x1000          /* zone_gc() called on this thread */ 
 194         boolean_t                       wake_active
;    /* wake event on stop */ 
 195         int                                     at_safe_point
;  /* thread_abort_safely allowed */ 
 196         ast_t                           reason
;                 /* why we blocked */ 
 197         uint32_t                        quantum_remaining
; 
 198         wait_result_t                   wait_result
;    /* outcome of wait - 
 199                                                          * may be examined by this thread 
 201         thread_continue_t       continuation
;   /* continue here next dispatch */ 
 202         void                            *parameter
;             /* continuation parameter */ 
 204         /* Data updated/used in thread_invoke */ 
 205         vm_offset_t             kernel_stack
;           /* current kernel stack */ 
 206         vm_offset_t                     reserved_stack
;         /* reserved kernel stack */ 
 209         struct kasan_thread_data kasan_data
; 
 215  *      Thread states [bits or'ed] 
 217 #define TH_WAIT                 0x01                    /* queued for waiting */ 
 218 #define TH_SUSP                 0x02                    /* stopped or requested to stop */ 
 219 #define TH_RUN                  0x04                    /* running or on runq */ 
 220 #define TH_UNINT                0x08                    /* waiting uninteruptibly */ 
 221 #define TH_TERMINATE            0x10                    /* halted at termination */ 
 222 #define TH_TERMINATE2           0x20                    /* added to termination queue */ 
 224 #define TH_IDLE                 0x80                    /* idling processor */ 
 226         /* Scheduling information */ 
 227         sched_mode_t                    sched_mode
;             /* scheduling mode */ 
 228         sched_mode_t                    saved_mode
;             /* saved mode during forced mode demotion */ 
 230         /* This thread's contribution to global sched counters */ 
 231         sched_bucket_t                  th_sched_bucket
; 
 233         sfi_class_id_t                  sfi_class
;              /* SFI class (XXX Updated on CSW/QE/AST) */ 
 234         sfi_class_id_t                  sfi_wait_class
; /* Currently in SFI wait for this class, protected by sfi_lock */ 
 237         uint32_t                        sched_flags
;            /* current flag bits */ 
 238 /* TH_SFLAG_FAIRSHARE_TRIPPED (unused)  0x0001 */ 
 239 #define TH_SFLAG_FAILSAFE               0x0002          /* fail-safe has tripped */ 
 240 #define TH_SFLAG_THROTTLED              0x0004          /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */ 
 241 #define TH_SFLAG_DEMOTED_MASK      (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE)     /* saved_mode contains previous sched_mode */ 
 243 #define TH_SFLAG_PROMOTED               0x0008          /* sched pri has been promoted */ 
 244 #define TH_SFLAG_ABORT                  0x0010          /* abort interruptible waits */ 
 245 #define TH_SFLAG_ABORTSAFELY            0x0020          /* ... but only those at safe point */ 
 246 #define TH_SFLAG_ABORTED_MASK           (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY) 
 247 #define TH_SFLAG_DEPRESS                0x0040          /* normal depress yield */ 
 248 #define TH_SFLAG_POLLDEPRESS            0x0080          /* polled depress yield */ 
 249 #define TH_SFLAG_DEPRESSED_MASK         (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS) 
 250 /* unused TH_SFLAG_PRI_UPDATE           0x0100 */ 
 251 #define TH_SFLAG_EAGERPREEMPT           0x0200          /* Any preemption of this thread should be treated as if AST_URGENT applied */ 
 252 #define TH_SFLAG_RW_PROMOTED            0x0400          /* sched pri has been promoted due to blocking with RW lock held */ 
 253 /* unused TH_SFLAG_THROTTLE_DEMOTED     0x0800 */ 
 254 #define TH_SFLAG_WAITQ_PROMOTED         0x1000          /* sched pri promoted from waitq wakeup (generally for IPC receive) */ 
 257 #define TH_SFLAG_EXEC_PROMOTED          0x8000          /* sched pri has been promoted since thread is in an exec */ 
 258 #define TH_SFLAG_PROMOTED_MASK          (TH_SFLAG_PROMOTED | TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED | TH_SFLAG_EXEC_PROMOTED) 
 260 #define TH_SFLAG_RW_PROMOTED_BIT        (10)    /* 0x400 */ 
 262         int16_t                         sched_pri
;              /* scheduled (current) priority */ 
 263         int16_t                         base_pri
;               /* base priority */ 
 264         int16_t                         max_priority
;           /* copy of max base priority */ 
 265         int16_t                         task_priority
;          /* copy of task base priority */ 
 267 #if defined(CONFIG_SCHED_GRRR) 
 269         uint16_t                        grrr_deficit
;           /* fixed point (1/1000th quantum) fractional deficit */ 
 273         int16_t                         promotions
;                     /* level of promotion */ 
 274         int16_t                         pending_promoter_index
; 
 275         _Atomic 
uint32_t                ref_count
;              /* number of references to me */ 
 276         void                            *pending_promoter
[2]; 
 278         uint32_t                        rwlock_count
;   /* Number of lck_rw_t locks held by thread */ 
 280         integer_t                       importance
;                     /* task-relative importance */ 
 281         uint32_t                        was_promoted_on_wakeup
; 
 283         /* Priority depression expiration */ 
 284         integer_t                       depress_timer_active
; 
 285         timer_call_data_t       depress_timer
; 
 286                                                                                 /* real-time parameters */ 
 287         struct {                                                                /* see mach/thread_policy.h */ 
 289                 uint32_t                        computation
; 
 291                 boolean_t                       preemptible
; 
 295         uint64_t                        last_run_time
;          /* time when thread was switched away from */ 
 296         uint64_t                        last_made_runnable_time
;        /* time when thread was unblocked or preempted */ 
 297         uint64_t                        last_basepri_change_time
;       /* time when thread was last changed in basepri while runnable */ 
 298         uint64_t                        same_pri_latency
; 
 299 #define THREAD_NOT_RUNNABLE (~0ULL) 
 302 #if defined(CONFIG_SCHED_MULTIQ) 
 303         sched_group_t                   sched_group
; 
 304 #endif /* defined(CONFIG_SCHED_MULTIQ) */ 
 306   /* Data used during setrun/dispatch */ 
 307         timer_data_t            system_timer
;           /* system mode timer */ 
 308         processor_t                     bound_processor
;        /* bound to a processor? */ 
 309         processor_t                     last_processor
;         /* processor last dispatched on */ 
 310         processor_t                     chosen_processor
;       /* Where we want to run this thread */ 
 312         /* Fail-safe computation since last unblock or qualifying yield */ 
 313         uint64_t                        computation_metered
; 
 314         uint64_t                        computation_epoch
; 
 315         uint64_t                        safe_release
;   /* when to release fail-safe */ 
 317         /* Call out from scheduler */ 
 321 #if defined(CONFIG_SCHED_PROTO) 
 322         uint32_t                        runqueue_generation
;    /* last time runqueue was drained */ 
 325         /* Statistics and timesharing calculations */ 
 326 #if defined(CONFIG_SCHED_TIMESHARE_CORE) 
 327         natural_t                       sched_stamp
;    /* last scheduler tick */ 
 328         natural_t                       sched_usage
;    /* timesharing cpu usage [sched] */ 
 329         natural_t                       pri_shift
;              /* usage -> priority from pset */ 
 330         natural_t                       cpu_usage
;              /* instrumented cpu usage [%cpu] */ 
 331         natural_t                       cpu_delta
;              /* accumulated cpu_usage delta */ 
 332 #endif /* CONFIG_SCHED_TIMESHARE_CORE */ 
 334         uint32_t                        c_switch
;               /* total context switches */ 
 335         uint32_t                        p_switch
;               /* total processor switches */ 
 336         uint32_t                        ps_switch
;              /* total pset switches */ 
 338         integer_t mutex_count
;  /* total count of locks held */ 
 339         /* Timing data structures */ 
 340         int                                     precise_user_kernel_time
; /* precise user/kernel enabled for this thread */ 
 341         timer_data_t            user_timer
;                     /* user mode timer */ 
 342         uint64_t                        user_timer_save
;        /* saved user timer value */ 
 343         uint64_t                        system_timer_save
;      /* saved system timer value */ 
 344         uint64_t                        vtimer_user_save
;       /* saved values for vtimers */ 
 345         uint64_t                        vtimer_prof_save
; 
 346         uint64_t                        vtimer_rlim_save
; 
 347         uint64_t                        vtimer_qos_save
; 
 349         timer_data_t            ptime
;                  /* time executing in P mode */ 
 352         /* Timing for wait state */ 
 353         uint64_t                wait_sfi_begin_time
;    /* start time for thread waiting in SFI */ 
 356         /* Timed wait expiration */ 
 357         timer_call_data_t       wait_timer
; 
 358         integer_t                       wait_timer_active
; 
 359         boolean_t                       wait_timer_is_set
; 
 363          * Processor/cache affinity 
 364          * - affinity_threads links task threads with the same affinity set 
 366         affinity_set_t                  affinity_set
; 
 367         queue_chain_t                   affinity_threads
; 
 369         /* Various bits of state to stash across a continuation, exclusive to the current thread block point */ 
 372                         mach_msg_return_t       state
;          /* receive state */ 
 373                         mach_port_seqno_t       seqno
;          /* seqno of recvd message */ 
 374                         ipc_object_t            object
;         /* object received on */ 
 375                         mach_vm_address_t       msg_addr
;       /* receive buffer pointer */ 
 376                         mach_msg_size_t         rsize
;          /* max size for recvd msg */ 
 377                         mach_msg_size_t         msize
;          /* actual size for recvd msg */ 
 378                         mach_msg_option_t       option
;         /* options for receive */ 
 379                         mach_port_name_t        receiver_name
;  /* the receive port name */ 
 380                         struct knote            
*knote
;         /* knote fired for rcv */ 
 382                                 struct ipc_kmsg   
*kmsg
;        /* received message */ 
 383                                 struct ipc_mqueue 
*peekq
;       /* mqueue to peek at */ 
 385                                         mach_msg_priority_t qos
;        /* received message qos */ 
 386                                         mach_msg_priority_t oqos
;       /* override qos for message */ 
 389                         mach_msg_continue_t     continuation
; 
 392                         struct semaphore        
*waitsemaphore
;         /* semaphore ref */ 
 393                         struct semaphore        
*signalsemaphore
;       /* semaphore ref */ 
 394                         int                                     options
;                        /* semaphore options */ 
 395                         kern_return_t           result
;                         /* primary result */ 
 396                         mach_msg_continue_t continuation
; 
 399                         int                                     option
;         /* switch option */ 
 400                         boolean_t                               reenable_workq_callback
;        /* on entry, callbacks were suspended */ 
 404         /* Only user threads can cause guard exceptions, only kernel threads can be thread call threads */ 
 406                 /* Group and call this thread is working on behalf of */ 
 408                         struct thread_call_group 
* thc_group
; 
 409                         struct thread_call 
*       thc_call
;                    /* debug only, may be deallocated */ 
 412                 /* Structure to save information about guard exception */ 
 414                         mach_exception_code_t           code
; 
 415                         mach_exception_subcode_t        subcode
; 
 419         /* Kernel holds on this thread  */ 
 420         int16_t                                         suspend_count
; 
 421         /* User level suspensions */ 
 422         int16_t                                         user_stop_count
; 
 424         /* IPC data structures */ 
 425 #if IMPORTANCE_INHERITANCE 
 426         natural_t ith_assertions
;                       /* assertions pending drop */ 
 428         struct ipc_kmsg_queue ith_messages
;             /* messages to reap */ 
 429         mach_port_t ith_rpc_reply
;                      /* reply port for kernel RPCs */ 
 431         /* Ast/Halt data structures */ 
 432         vm_offset_t                                     recover
;                /* page fault recover(copyin/out) */ 
 434         queue_chain_t                           threads
;                /* global list of all threads */ 
 437                 queue_chain_t                   task_threads
; 
 439                 /* Task membership */ 
 443                 decl_lck_mtx_data(,mutex
) 
 446                 /* Pending thread ast(s) */ 
 449                 /* Miscellaneous bits guarded by mutex */ 
 451                         active
:1,                       /* Thread is active and has not been terminated */ 
 452                         started
:1,                      /* Thread has been started after creation */ 
 453                         static_param
:1,                 /* Disallow policy parameter changes */ 
 454                         inspection
:1,                   /* TRUE when task is being inspected by crash reporter */ 
 455                         policy_reset
:1,                 /* Disallow policy parameter changes on terminating threads */ 
 456                         suspend_parked
:1,               /* thread parked in thread_suspended */ 
 457                         corpse_dup
:1,                   /* TRUE when thread is an inactive duplicate in a corpse */ 
 460                 /* Ports associated with this thread */ 
 461                 struct ipc_port                 
*ith_self
;              /* not a right, doesn't hold ref */ 
 462                 struct ipc_port                 
*ith_sself
;             /* a send right */ 
 463                 struct ipc_port                 
*ith_special_reply_port
; /* ref to special reply port */ 
 464                 struct exception_action 
*exc_actions
; 
 471                 uint32_t t_dtrace_flags
;        /* DTrace thread states */ 
 472 #define TH_DTRACE_EXECSUCCESS   0x01 
 473                 uint32_t t_dtrace_predcache
;/* DTrace per thread predicate value hint */ 
 474                 int64_t t_dtrace_tracing
;       /* Thread time under dtrace_probe() */ 
 475                 int64_t t_dtrace_vtime
; 
 478                 clock_sec_t t_page_creation_time
; 
 479                 uint32_t    t_page_creation_count
; 
 480                 uint32_t    t_page_creation_throttled
; 
 481 #if (DEVELOPMENT || DEBUG) 
 482                 uint64_t    t_page_creation_throttled_hard
; 
 483                 uint64_t    t_page_creation_throttled_soft
; 
 484 #endif /* DEVELOPMENT || DEBUG */ 
 487 /* The high 7 bits are the number of frames to sample of a user callstack. */ 
 488 #define T_KPERF_CALLSTACK_DEPTH_OFFSET     (25) 
 489 #define T_KPERF_SET_CALLSTACK_DEPTH(DEPTH) (((uint32_t)(DEPTH)) << T_KPERF_CALLSTACK_DEPTH_OFFSET) 
 490 #define T_KPERF_GET_CALLSTACK_DEPTH(FLAGS) ((FLAGS) >> T_KPERF_CALLSTACK_DEPTH_OFFSET) 
 493 #define T_KPERF_AST_CALLSTACK (1U << 0) /* dump a callstack on thread's next AST */ 
 494 #define T_KPERF_AST_DISPATCH  (1U << 1) /* dump a name on thread's next AST */ 
 495 #define T_KPC_ALLOC           (1U << 2) /* thread needs a kpc_buf allocated */ 
 496 /* only go up to T_KPERF_CALLSTACK_DEPTH_OFFSET - 1 */ 
 499         uint32_t kperf_flags
; 
 500         uint32_t kperf_pet_gen
;  /* last generation of PET that sampled this thread*/ 
 501         uint32_t kperf_c_switch
; /* last dispatch detection */ 
 502         uint32_t kperf_pet_cnt
;  /* how many times a thread has been sampled by PET */ 
 506         /* accumulated performance counters for this thread */ 
 511         /* hypervisor virtual CPU object associated with this thread */ 
 512         void *hv_thread_target
; 
 513 #endif /* HYPERVISOR */ 
 515                 uint64_t thread_id
;     /*system wide unique thread-id*/ 
 517         /* Statistics accumulated per-thread and aggregated per-task */ 
 518         uint32_t                syscalls_unix
; 
 519         uint32_t                syscalls_mach
; 
 521         ledger_t                t_threadledger
; /* per thread ledger */ 
 522         ledger_t                t_bankledger
;                /* ledger to charge someone */ 
 523         uint64_t                t_deduct_bank_ledger_time
;   /* cpu time to be deducted from bank ledger */ 
 524         uint64_t                t_deduct_bank_ledger_energy
; /* energy to be deducted from bank ledger */ 
 527         struct mt_thread t_monotonic
; 
 528 #endif /* MONOTONIC */ 
 530         /*** Machine-dependent state ***/ 
 531         struct machine_thread   machine
; 
 533         /* policy is protected by the thread mutex */ 
 534         struct thread_requested_policy  requested_policy
; 
 535         struct thread_effective_policy  effective_policy
; 
 537         /* usynch override is protected by the task lock, eventually will be thread mutex */ 
 538         struct thread_qos_override 
{ 
 539                 struct thread_qos_override      
*override_next
; 
 540                 uint32_t        override_contended_resource_count
; 
 541                 int16_t         override_qos
; 
 542                 int16_t         override_resource_type
; 
 543                 user_addr_t     override_resource
; 
 546         _Atomic 
uint32_t kqwl_owning_count
; 
 547         uint32_t        ipc_overrides
; 
 548         uint32_t        sync_ipc_overrides
; 
 549         uint32_t        user_promotions
; 
 550         uint16_t        user_promotion_basepri
; 
 551         _Atomic 
uint16_t kevent_ast_bits
; 
 553         block_hint_t    pending_block_hint
; 
 554         block_hint_t    block_hint
;      /* What type of primitive last caused us to block. */ 
 556         int     iotier_override
; /* atomic operations to set, cleared on ret to user */ 
 557         io_stat_info_t                  thread_io_stats
; /* per-thread I/O statistics */ 
 560         task_watch_t 
*  taskwatch
;              /* task watch */ 
 561 #endif /* CONFIG_EMBEDDED */ 
 563         uint32_t                        thread_callout_interrupt_wakeups
; 
 564         uint32_t                        thread_callout_platform_idle_wakeups
; 
 565         uint32_t                        thread_timer_wakeups_bin_1
; 
 566         uint32_t                        thread_timer_wakeups_bin_2
; 
 568         uint16_t                        callout_woken_from_icontext
:1, 
 569                                         callout_woken_from_platform_idle
:1, 
 570                                         callout_woke_thread
:1, 
 571                                         thread_bitfield_unused
:13; 
 573         mach_port_name_t                ith_voucher_name
; 
 574         ipc_voucher_t                   ith_voucher
; 
 577 #endif /* CONFIG_IOSCHED */ 
 579         /* work interval (if any) associated with the thread. Uses thread mutex */ 
 580         struct work_interval            
*th_work_interval
; 
 582 #if     SCHED_TRACE_THREAD_WAKEUPS 
 583         uintptr_t               thread_wakeup_bt
[64]; 
 587 #define ith_state           saved.receive.state 
 588 #define ith_object          saved.receive.object 
 589 #define ith_msg_addr        saved.receive.msg_addr 
 590 #define ith_rsize           saved.receive.rsize 
 591 #define ith_msize           saved.receive.msize 
 592 #define ith_option          saved.receive.option 
 593 #define ith_receiver_name   saved.receive.receiver_name 
 594 #define ith_continuation    saved.receive.continuation 
 595 #define ith_kmsg            saved.receive.kmsg 
 596 #define ith_peekq           saved.receive.peekq 
 597 #define ith_knote           saved.receive.knote 
 598 #define ith_qos             saved.receive.received_qos.qos 
 599 #define ith_qos_override    saved.receive.received_qos.oqos 
 600 #define ith_seqno           saved.receive.seqno 
 602 #define sth_waitsemaphore   saved.sema.waitsemaphore 
 603 #define sth_signalsemaphore saved.sema.signalsemaphore 
 604 #define sth_options         saved.sema.options 
 605 #define sth_result          saved.sema.result 
 606 #define sth_continuation    saved.sema.continuation 
 608 #define ITH_KNOTE_NULL      ((void *)NULL) 
 609 #define ITH_KNOTE_PSEUDO    ((void *)0xdeadbeef) 
 610 #define ITH_KNOTE_VALID(kn) ((kn) != ITH_KNOTE_NULL && (kn) != ITH_KNOTE_PSEUDO) 
 613 #define assert_thread_magic(thread) assertf((thread)->thread_magic == THREAD_MAGIC, \ 
 614                                             "bad thread magic 0x%llx for thread %p, expected 0x%llx", \ 
 615                                             (thread)->thread_magic, (thread), THREAD_MAGIC) 
 617 #define assert_thread_magic(thread) do { (void)(thread); } while (0) 
 620 extern void                     thread_bootstrap(void); 
 622 extern void                     thread_init(void); 
 624 extern void                     thread_daemon_init(void); 
 626 #define thread_reference_internal(thread)       \ 
 627                         (void)atomic_fetch_add_explicit(&(thread)->ref_count, 1, memory_order_relaxed) 
 629 #define thread_reference(thread)                                        \ 
 631         if ((thread) != THREAD_NULL)                                    \ 
 632                 thread_reference_internal(thread);              \ 
 635 extern void                     thread_deallocate( 
 638 extern void                     thread_deallocate_safe( 
 641 extern void                     thread_inspect_deallocate( 
 642                                                 thread_inspect_t        thread
); 
 644 extern void                     thread_terminate_self(void); 
 646 extern kern_return_t    
thread_terminate_internal( 
 649 extern void                     thread_start( 
 650                                                         thread_t                        thread
) __attribute__ ((noinline
)); 
 652 extern void                     thread_start_in_assert_wait( 
 655                                                         wait_interrupt_t    interruptible
) __attribute__ ((noinline
)); 
 657 extern void                     thread_terminate_enqueue( 
 660 extern void                     thread_exception_enqueue( 
 663                                                 exception_type_t etype
); 
 665 extern void                     thread_copy_resource_info( 
 667                                                 thread_t src_thread
); 
 669 extern void                     thread_terminate_crashed_threads(void); 
 671 extern void                     thread_stack_enqueue( 
 674 extern void                     thread_hold( 
 677 extern void                     thread_release( 
 680 extern void                     thread_corpse_continue(void); 
 682 extern boolean_t                
thread_is_active(thread_t thread
); 
 684 /* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */ 
 686 #define thread_lock_init(th)    simple_lock_init(&(th)->sched_lock, 0) 
 687 #define thread_lock(th)                 simple_lock(&(th)->sched_lock) 
 688 #define thread_unlock(th)               simple_unlock(&(th)->sched_lock) 
 690 #define wake_lock_init(th)              simple_lock_init(&(th)->wake_lock, 0) 
 691 #define wake_lock(th)                   simple_lock(&(th)->wake_lock) 
 692 #define wake_unlock(th)                 simple_unlock(&(th)->wake_lock) 
 694 #define thread_lock_init(th)    do { (void)th; } while(0) 
 695 #define thread_lock(th)                 do { (void)th; } while(0) 
 696 #define thread_unlock(th)               do { (void)th; } while(0) 
 698 #define wake_lock_init(th)              do { (void)th; } while(0) 
 699 #define wake_lock(th)                   do { (void)th; } while(0) 
 700 #define wake_unlock(th)                 do { (void)th; } while(0) 
 703 #define thread_should_halt_fast(thread)         (!(thread)->active) 
 705 extern void                             stack_alloc( 
 708 extern void                     stack_handoff( 
 712 extern void                             stack_free( 
 715 extern void                             stack_free_reserved( 
 718 extern boolean_t                
stack_alloc_try( 
 721 extern void                             stack_collect(void); 
 723 extern void                             stack_init(void); 
 726 extern kern_return_t    
thread_info_internal( 
 728                                                         thread_flavor_t                 flavor
, 
 729                                                         thread_info_t                   thread_info_out
, 
 730                                                         mach_msg_type_number_t  
*thread_info_count
); 
 734 extern kern_return_t    
kernel_thread_create( 
 735                                                         thread_continue_t       continuation
, 
 738                                                         thread_t                        
*new_thread
); 
 740 extern kern_return_t    
kernel_thread_start_priority( 
 741                                                         thread_continue_t       continuation
, 
 744                                                         thread_t                        
*new_thread
); 
 746 extern void                             machine_stack_attach( 
 750 extern vm_offset_t              
machine_stack_detach( 
 753 extern void                             machine_stack_handoff( 
 757 extern thread_t                 
machine_switch_context( 
 759                                                         thread_continue_t       continuation
, 
 760                                                         thread_t                        new_thread
); 
 762 extern void                             machine_load_context( 
 763                                                         thread_t                thread
) __attribute__((noreturn
)); 
 766 extern kern_return_t    
machine_thread_state_initialize( 
 769 extern kern_return_t    
machine_thread_set_state( 
 771                                                         thread_flavor_t                 flavor
, 
 772                                                         thread_state_t                  state
, 
 773                                                         mach_msg_type_number_t  count
); 
 775 extern kern_return_t    
machine_thread_get_state( 
 777                                                         thread_flavor_t                 flavor
, 
 778                                                         thread_state_t                  state
, 
 779                                                         mach_msg_type_number_t  
*count
); 
 781 extern kern_return_t    
machine_thread_dup( 
 785 extern void                             machine_thread_init(void); 
 787 extern kern_return_t    
machine_thread_create( 
 790 extern void             machine_thread_switch_addrmode( 
 793 extern void                 machine_thread_destroy( 
 796 extern void                             machine_set_current_thread( 
 799 extern kern_return_t    
machine_thread_get_kern_state( 
 801                                                         thread_flavor_t                 flavor
, 
 802                                                         thread_state_t                  tstate
, 
 803                                                         mach_msg_type_number_t  
*count
); 
 805 extern kern_return_t    
machine_thread_inherit_taskwide( 
 809 extern kern_return_t    
machine_thread_set_tsd_base( 
 811                                                         mach_vm_offset_t                tsd_base
); 
 813 #define thread_mtx_lock(thread)                 lck_mtx_lock(&(thread)->mutex) 
 814 #define thread_mtx_try(thread)                  lck_mtx_try_lock(&(thread)->mutex) 
 815 #define thread_mtx_unlock(thread)               lck_mtx_unlock(&(thread)->mutex) 
 817 extern void thread_apc_ast(thread_t thread
); 
 819 extern void thread_update_qos_cpu_time(thread_t thread
); 
 821 void act_machine_sv_free(thread_t
, int); 
 823 vm_offset_t                     
min_valid_stack_address(void); 
 824 vm_offset_t                     
max_valid_stack_address(void); 
 826 static inline uint16_t  thread_set_tag_internal(thread_t        thread
, uint16_t tag
) { 
 827         return __sync_fetch_and_or(&thread
->thread_tag
, tag
); 
 830 static inline uint16_t  thread_get_tag_internal(thread_t        thread
) { 
 831         return thread
->thread_tag
; 
 835 extern void thread_set_options(uint32_t thopt
); 
 838 #else   /* MACH_KERNEL_PRIVATE */ 
 842 extern thread_t         
current_thread(void); 
 844 extern void                     thread_reference( 
 847 extern void                     thread_deallocate( 
 852 #endif  /* MACH_KERNEL_PRIVATE */ 
 854 #ifdef  KERNEL_PRIVATE 
 858 extern void                     thread_starts_owning_workloop( 
 861 extern void                     thread_ends_owning_workloop( 
 864 extern uint32_t         thread_owned_workloops_count( 
 868 extern uint64_t                 thread_dispatchqaddr( 
 871 extern uint64_t                 thread_rettokern_addr( 
 876 #endif  /* KERNEL_PRIVATE */ 
 881 extern uint64_t                 thread_tid(thread_t thread
); 
 889 #ifdef  XNU_KERNEL_PRIVATE 
 892  * Thread tags; for easy identification. 
 894 #define THREAD_TAG_MAINTHREAD 0x1 
 895 #define THREAD_TAG_CALLOUT 0x2 
 896 #define THREAD_TAG_IOWORKLOOP 0x4 
 898 #define THREAD_TAG_PTHREAD 0x10 
 899 #define THREAD_TAG_WORKQUEUE 0x20 
 901 uint16_t        thread_set_tag(thread_t
, uint16_t); 
 902 uint16_t        thread_get_tag(thread_t
); 
 904 extern kern_return_t    
thread_state_initialize( 
 907 extern kern_return_t    
thread_setstatus( 
 910                                                         thread_state_t                  tstate
, 
 911                                                         mach_msg_type_number_t  count
); 
 913 extern kern_return_t    
thread_getstatus( 
 916                                                         thread_state_t                  tstate
, 
 917                                                         mach_msg_type_number_t  
*count
); 
 919 extern kern_return_t    
thread_create_with_continuation( 
 921                                                         thread_t 
*new_thread
, 
 922                                                         thread_continue_t continuation
); 
 924 extern kern_return_t 
thread_create_waiting(task_t               task
, 
 925                                            thread_continue_t    continuation
, 
 927                                            thread_t             
*new_thread
); 
 929 extern kern_return_t    
thread_create_workq( 
 931                                                         thread_continue_t       thread_return
, 
 932                                                         thread_t                
*new_thread
); 
 934 extern kern_return_t    
thread_create_workq_waiting( 
 936                                                         thread_continue_t       thread_return
, 
 938                                                         thread_t                
*new_thread
); 
 940 extern  void    thread_yield_internal( 
 941         mach_msg_timeout_t      interval
); 
 943 extern void     thread_yield_to_preemption(void); 
 946  * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are: 
 948  * 1) Block. Prevent CPU consumption of the thread from exceeding the limit. 
 949  * 2) Exception. Generate a resource consumption exception when the limit is exceeded. 
 950  * 3) Disable. Remove any existing CPU limit. 
 952 #define THREAD_CPULIMIT_BLOCK           0x1 
 953 #define THREAD_CPULIMIT_EXCEPTION       0x2 
 954 #define THREAD_CPULIMIT_DISABLE         0x3 
 956 struct _thread_ledger_indices 
{ 
 960 extern struct _thread_ledger_indices thread_ledgers
; 
 962 extern int thread_get_cpulimit(int *action
, uint8_t *percentage
, uint64_t *interval_ns
); 
 963 extern int thread_set_cpulimit(int action
, uint8_t percentage
, uint64_t interval_ns
); 
 965 extern void                     thread_read_times( 
 967                                                 time_value_t    
*user_time
, 
 968                                                 time_value_t    
*system_time
); 
 970 extern uint64_t         thread_get_runtime_self(void); 
 972 extern void                     thread_setuserstack( 
 974                                                 mach_vm_offset_t        user_stack
); 
 976 extern uint64_t         thread_adjuserstack( 
 980 extern void                     thread_setentrypoint( 
 982                                                 mach_vm_offset_t        entry
); 
 984 extern kern_return_t    
thread_set_tsd_base( 
 986                                                         mach_vm_offset_t tsd_base
); 
 988 extern kern_return_t    
thread_setsinglestep( 
 992 extern kern_return_t    
thread_userstack( 
1001 extern kern_return_t    
thread_entrypoint( 
1006                                                 mach_vm_offset_t 
*);  
1008 extern kern_return_t    
thread_userstackdefault( 
1012 extern kern_return_t    
thread_wire_internal( 
1013                                                         host_priv_t             host_priv
, 
1016                                                         boolean_t               
*prev_state
); 
1019 extern kern_return_t    
thread_dup(thread_t
); 
1021 extern kern_return_t 
thread_dup2(thread_t
, thread_t
); 
1023 #if !defined(_SCHED_CALL_T_DEFINED) 
1024 #define _SCHED_CALL_T_DEFINED 
1025 typedef void    (*sched_call_t
)( 
1030 #define SCHED_CALL_BLOCK                0x1 
1031 #define SCHED_CALL_UNBLOCK              0x2 
1033 extern void             thread_sched_call( 
1037 extern sched_call_t     
thread_disable_sched_call( 
1041 extern void     thread_reenable_sched_call( 
1045 extern void             thread_static_param( 
1049 extern boolean_t        
thread_is_static_param( 
1052 extern task_t   
get_threadtask(thread_t
); 
1053 #define thread_is_64bit(thd)    \ 
1054         task_has_64BitAddr(get_threadtask(thd)) 
1057 extern void             *get_bsdthread_info(thread_t
); 
1058 extern void             set_bsdthread_info(thread_t
, void *); 
1059 extern void             *uthread_alloc(task_t
, thread_t
, int); 
1060 extern void             uthread_cleanup_name(void *uthread
); 
1061 extern void             uthread_cleanup(task_t
, void *, void *); 
1062 extern void             uthread_zone_free(void *);  
1063 extern void             uthread_cred_free(void *); 
1065 extern void             uthread_reset_proc_refcount(void *); 
1067 extern int              uthread_get_proc_refcount(void *); 
1068 extern int              proc_ref_tracking_disabled
; 
1071 extern boolean_t        
thread_should_halt( 
1074 extern boolean_t        
thread_should_abort( 
1077 extern int is_64signalregset(void); 
1079 extern void act_set_kperf(thread_t
); 
1080 extern void set_astledger(thread_t thread
); 
1081 extern void act_set_io_telemetry_ast(thread_t
); 
1083 extern uint32_t dtrace_get_thread_predcache(thread_t
); 
1084 extern int64_t dtrace_get_thread_vtime(thread_t
); 
1085 extern int64_t dtrace_get_thread_tracing(thread_t
); 
1086 extern boolean_t 
dtrace_get_thread_reentering(thread_t
); 
1087 extern int dtrace_get_thread_last_cpu_id(thread_t
); 
1088 extern vm_offset_t 
dtrace_get_kernel_stack(thread_t
); 
1089 extern void dtrace_set_thread_predcache(thread_t
, uint32_t); 
1090 extern void dtrace_set_thread_vtime(thread_t
, int64_t); 
1091 extern void dtrace_set_thread_tracing(thread_t
, int64_t); 
1092 extern void dtrace_set_thread_reentering(thread_t
, boolean_t
); 
1093 extern vm_offset_t 
dtrace_set_thread_recover(thread_t
, vm_offset_t
); 
1094 extern void dtrace_thread_bootstrap(void); 
1095 extern void dtrace_thread_didexec(thread_t
); 
1097 extern int64_t dtrace_calc_thread_recent_vtime(thread_t
); 
1100 extern kern_return_t    
thread_set_wq_state32( 
1102                                               thread_state_t    tstate
); 
1104 extern kern_return_t    
thread_set_wq_state64( 
1106                                               thread_state_t    tstate
); 
1108 extern vm_offset_t      kernel_stack_mask
; 
1109 extern vm_offset_t      kernel_stack_size
; 
1110 extern vm_offset_t      kernel_stack_depth_max
; 
1112 extern void guard_ast(thread_t
); 
1113 extern void fd_guard_ast(thread_t
, 
1114         mach_exception_code_t
, mach_exception_subcode_t
); 
1116 extern void vn_guard_ast(thread_t
, 
1117         mach_exception_code_t
, mach_exception_subcode_t
); 
1119 extern void mach_port_guard_ast(thread_t
, 
1120         mach_exception_code_t
, mach_exception_subcode_t
); 
1121 extern void thread_guard_violation(thread_t
, 
1122         mach_exception_code_t
, mach_exception_subcode_t
); 
1123 extern void thread_update_io_stats(thread_t
, int size
, int io_flags
); 
1125 extern kern_return_t    
thread_set_voucher_name(mach_port_name_t name
); 
1126 extern kern_return_t 
thread_get_current_voucher_origin_pid(int32_t *pid
); 
1128 extern void set_thread_rwlock_boost(void); 
1129 extern void clear_thread_rwlock_boost(void); 
1131 /*! @function thread_has_thread_name 
1132     @abstract Checks if a thread has a name. 
1133     @discussion This function takes one input, a thread, and returns a boolean value indicating if that thread already has a name associated with it. 
1134     @param th The thread to inspect. 
1135     @result TRUE if the thread has a name, FALSE otherwise. 
1137 extern boolean_t 
thread_has_thread_name(thread_t th
); 
1139 /*! @function thread_set_thread_name 
1140     @abstract Set a thread's name. 
1141     @discussion This function takes two input parameters: a thread to name, and the name to apply to the thread.  The name will be attached to the thread in order to better identify the thread. 
1142     @param th The thread to be named. 
1143     @param name The name to apply to the thread. 
1145 extern void thread_set_thread_name(thread_t th
, const char* name
); 
1147 extern void thread_enable_send_importance(thread_t thread
, boolean_t enable
); 
1149 /* Get a backtrace for a threads kernel or user stack (user_p), with pc and optionally 
1150  * frame pointer (getfp). Returns bytes added to buffer, and kThreadTruncatedBT in 
1151  * thread_trace_flags if a user page is not present after kdp_lightweight_fault() is 
1155 extern int                              machine_trace_thread( 
1162                                                         uint32_t *thread_trace_flags
); 
1164 extern int                              machine_trace_thread64(thread_t thread
, 
1170                                                         uint32_t *thread_trace_flags
); 
1172 #endif  /* XNU_KERNEL_PRIVATE */ 
1175 /*! @function kernel_thread_start 
1176     @abstract Create a kernel thread. 
1177     @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread). 
1178     @param continuation A C-function pointer where the thread will begin execution. 
1179     @param parameter Caller specified data to be passed to the new thread. 
1180     @param new_thread Reference to the new thread is returned in this parameter. 
1181     @result Returns KERN_SUCCESS on success or an appropriate kernel code type. 
1184 extern kern_return_t    
kernel_thread_start( 
1185                                                         thread_continue_t       continuation
, 
1187                                                         thread_t                        
*new_thread
); 
1189 #ifdef KERNEL_PRIVATE 
1190 void thread_set_eager_preempt(thread_t thread
); 
1191 void thread_clear_eager_preempt(thread_t thread
); 
1192 extern ipc_port_t 
convert_thread_to_port(thread_t
); 
1193 extern ipc_port_t 
convert_thread_inspect_to_port(thread_inspect_t
); 
1194 extern boolean_t 
is_vm_privileged(void); 
1195 extern boolean_t 
set_vm_privilege(boolean_t
); 
1196 extern kern_allocation_name_t 
thread_set_allocation_name(kern_allocation_name_t new_name
); 
1197 #endif /* KERNEL_PRIVATE */ 
1201 #endif  /* _KERN_THREAD_H_ */