+kern_return_t
+ledger_get_period(ledger_t ledger, int entry, uint64_t *period)
+{
+ struct ledger_entry *le;
+
+ if (!ENTRY_VALID(ledger, entry))
+ return (KERN_INVALID_VALUE);
+
+ le = &ledger->l_entries[entry];
+ *period = abstime_to_nsecs(le->_le.le_refill.le_refill_period);
+ lprintf(("ledger_get_period: %llx\n", *period));
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Adjust the automatic refill period.
+ */
+kern_return_t
+ledger_set_period(ledger_t ledger, int entry, uint64_t period)
+{
+ struct ledger_entry *le;
+
+ lprintf(("ledger_set_period: %llx\n", period));
+ if (!ENTRY_VALID(ledger, entry))
+ return (KERN_INVALID_VALUE);
+
+ le = &ledger->l_entries[entry];
+
+ /*
+ * A refill period refills the ledger in multiples of the limit,
+ * so if you haven't set one yet, you need a lesson on ledgers.
+ */
+ assert(le->le_limit != LEDGER_LIMIT_INFINITY);
+
+ if (le->le_flags & LF_TRACKING_MAX) {
+ /*
+ * Refill is incompatible with rolling max tracking.
+ */
+ return (KERN_INVALID_VALUE);
+ }
+
+ le->_le.le_refill.le_refill_period = nsecs_to_abstime(period);
+
+ /*
+ * Set the 'starting time' for the next refill to now. Since
+ * we're resetting the balance to zero here, we consider this
+ * moment the starting time for accumulating a balance that
+ * counts towards the limit.
+ */
+ le->_le.le_refill.le_last_refill = mach_absolute_time();
+ ledger_zero_balance(ledger, entry);
+
+ flag_set(&le->le_flags, LF_REFILL_SCHEDULED);
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Disable automatic refill.
+ */
+kern_return_t
+ledger_disable_refill(ledger_t ledger, int entry)
+{
+ struct ledger_entry *le;
+
+ if (!ENTRY_VALID(ledger, entry))
+ return (KERN_INVALID_VALUE);
+
+ le = &ledger->l_entries[entry];
+
+ flag_clear(&le->le_flags, LF_REFILL_SCHEDULED);
+
+ return (KERN_SUCCESS);
+}
+
+kern_return_t
+ledger_get_actions(ledger_t ledger, int entry, int *actions)
+{
+ if (!ENTRY_VALID(ledger, entry))
+ return (KERN_INVALID_VALUE);
+
+ *actions = ledger->l_entries[entry].le_flags & LEDGER_ACTION_MASK;
+ lprintf(("ledger_get_actions: %#x\n", *actions));
+ return (KERN_SUCCESS);
+}
+
+kern_return_t
+ledger_set_action(ledger_t ledger, int entry, int action)
+{
+ lprintf(("ledger_set_action: %#x\n", action));
+ if (!ENTRY_VALID(ledger, entry))
+ return (KERN_INVALID_VALUE);
+
+ flag_set(&ledger->l_entries[entry].le_flags, action);
+ return (KERN_SUCCESS);
+}
+
+void
+set_astledger(thread_t thread)
+{
+ spl_t s = splsched();
+
+ if (thread == current_thread()) {
+ thread_ast_set(thread, AST_LEDGER);
+ ast_propagate(thread->ast);
+ } else {
+ processor_t p;
+
+ thread_lock(thread);
+ thread_ast_set(thread, AST_LEDGER);
+ p = thread->last_processor;
+ if ((p != PROCESSOR_NULL) && (p->state == PROCESSOR_RUNNING) &&
+ (p->active_thread == thread))
+ cause_ast_check(p);
+ thread_unlock(thread);
+ }
+
+ splx(s);
+}
+
+kern_return_t
+ledger_debit(ledger_t ledger, int entry, ledger_amount_t amount)
+{
+ struct ledger_entry *le;
+ ledger_amount_t old, new;
+
+ if (!ENTRY_VALID(ledger, entry) || (amount < 0))
+ return (KERN_INVALID_ARGUMENT);
+
+ if (amount == 0)
+ return (KERN_SUCCESS);
+
+ le = &ledger->l_entries[entry];
+
+ old = OSAddAtomic64(amount, &le->le_debit);
+ new = old + amount;
+
+ lprintf(("%p Debit %lld->%lld\n", thread, old, new));
+ ledger_check_new_balance(ledger, entry);
+ return (KERN_SUCCESS);
+
+}
+
+void
+ledger_ast(thread_t thread)
+{
+ struct ledger *l = thread->t_ledger;
+ struct ledger *thl;
+ uint32_t block;
+ uint64_t now;
+ uint8_t task_flags;
+ uint8_t task_percentage;
+ uint64_t task_interval;
+
+ kern_return_t ret;
+ task_t task = thread->task;
+
+ lprintf(("Ledger AST for %p\n", thread));
+
+ ASSERT(task != NULL);
+ ASSERT(thread == current_thread());
+
+top:
+ /*
+ * Take a self-consistent snapshot of the CPU usage monitor parameters. The task
+ * can change them at any point (with the task locked).
+ */
+ task_lock(task);
+ task_flags = task->rusage_cpu_flags;
+ task_percentage = task->rusage_cpu_perthr_percentage;
+ task_interval = task->rusage_cpu_perthr_interval;
+ task_unlock(task);
+
+ /*
+ * Make sure this thread is up to date with regards to any task-wide per-thread
+ * CPU limit, but only if it doesn't have a thread-private blocking CPU limit.
+ */
+ if (((task_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) != 0) &&
+ ((thread->options & TH_OPT_PRVT_CPULIMIT) == 0)) {
+ uint8_t percentage;
+ uint64_t interval;
+ int action;
+
+ thread_get_cpulimit(&action, &percentage, &interval);
+
+ /*
+ * If the thread's CPU limits no longer match the task's, or the
+ * task has a limit but the thread doesn't, update the limit.
+ */
+ if (((thread->options & TH_OPT_PROC_CPULIMIT) == 0) ||
+ (interval != task_interval) || (percentage != task_percentage)) {
+ thread_set_cpulimit(THREAD_CPULIMIT_EXCEPTION, task_percentage, task_interval);
+ assert((thread->options & TH_OPT_PROC_CPULIMIT) != 0);
+ }
+ } else if (((task_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) &&
+ (thread->options & TH_OPT_PROC_CPULIMIT)) {
+ assert((thread->options & TH_OPT_PRVT_CPULIMIT) == 0);
+
+ /*
+ * Task no longer has a per-thread CPU limit; remove this thread's
+ * corresponding CPU limit.
+ */
+ thread_set_cpulimit(THREAD_CPULIMIT_DISABLE, 0, 0);
+ assert((thread->options & TH_OPT_PROC_CPULIMIT) == 0);
+ }
+
+ /*
+ * If the task or thread is being terminated, let's just get on with it
+ */
+ if ((l == NULL) || !task->active || task->halting || !thread->active)
+ return;
+
+ /*
+ * Examine all entries in deficit to see which might be eligble for
+ * an automatic refill, which require callbacks to be issued, and
+ * which require blocking.
+ */
+ block = 0;
+ now = mach_absolute_time();
+
+ /*
+ * Note that thread->t_threadledger may have been changed by the
+ * thread_set_cpulimit() call above - so don't examine it until afterwards.
+ */
+ thl = thread->t_threadledger;
+ if (LEDGER_VALID(thl)) {
+ block |= ledger_check_needblock(thl, now);
+ }
+ block |= ledger_check_needblock(l, now);
+
+ /*
+ * If we are supposed to block on the availability of one or more
+ * resources, find the first entry in deficit for which we should wait.
+ * Schedule a refill if necessary and then sleep until the resource
+ * becomes available.
+ */
+ if (block) {
+ if (LEDGER_VALID(thl)) {
+ ret = ledger_perform_blocking(thl);
+ if (ret != KERN_SUCCESS)
+ goto top;
+ }
+ ret = ledger_perform_blocking(l);
+ if (ret != KERN_SUCCESS)
+ goto top;
+ } /* block */
+}
+
+static uint32_t
+ledger_check_needblock(ledger_t l, uint64_t now)
+{
+ int i;
+ uint32_t flags, block = 0;
+ struct ledger_entry *le;
+ struct ledger_callback *lc;
+
+
+ for (i = 0; i < l->l_size; i++) {
+ le = &l->l_entries[i];
+
+ lc = entry_get_callback(l, i);
+
+ if (limit_exceeded(le) == FALSE) {
+ if (le->le_flags & LEDGER_ACTION_CALLBACK) {
+ /*
+ * If needed, invoke the callback as a warning.
+ * This needs to happen both when the balance rises above
+ * the warning level, and also when it dips back below it.
+ */
+ assert(lc != NULL);
+ /*
+ * See comments for matching logic in ledger_check_new_balance().
+ */
+ if (warn_level_exceeded(le)) {
+ flags = flag_set(&le->le_flags, LF_WARNED);
+ if ((flags & LF_WARNED) == 0) {
+ lc->lc_func(LEDGER_WARNING_ROSE_ABOVE, lc->lc_param0, lc->lc_param1);
+ }
+ } else {
+ flags = flag_clear(&le->le_flags, LF_WARNED);
+ if (flags & LF_WARNED) {
+ lc->lc_func(LEDGER_WARNING_DIPPED_BELOW, lc->lc_param0, lc->lc_param1);
+ }
+ }
+ }
+
+ continue;
+ }
+
+ /* We're over the limit, so refill if we are eligible and past due. */
+ if (le->le_flags & LF_REFILL_SCHEDULED) {
+ if ((le->_le.le_refill.le_last_refill + le->_le.le_refill.le_refill_period) > now) {
+ ledger_refill(now, l, i);
+ if (limit_exceeded(le) == FALSE)
+ continue;
+ }
+ }
+
+ if (le->le_flags & LEDGER_ACTION_BLOCK)
+ block = 1;
+ if ((le->le_flags & LEDGER_ACTION_CALLBACK) == 0)
+ continue;
+
+ /*
+ * If the LEDGER_ACTION_CALLBACK flag is on, we expect there to
+ * be a registered callback.
+ */
+ assert(lc != NULL);
+ flags = flag_set(&le->le_flags, LF_CALLED_BACK);
+ /* Callback has already been called */
+ if (flags & LF_CALLED_BACK)
+ continue;
+ lc->lc_func(FALSE, lc->lc_param0, lc->lc_param1);
+ }
+ return(block);
+}
+
+
+/* return KERN_SUCCESS to continue, KERN_FAILURE to restart */
+static kern_return_t
+ledger_perform_blocking(ledger_t l)