#include <machine/machine_routines.h>
#include <machine/exec.h>
+#include <nfs/nfs_conf.h>
+
#include <vm/vm_protos.h>
#include <vm/vm_pageout.h>
#include <vm/vm_compressor_algorithms.h>
extern int
kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
-#if NFSCLIENT
+#if CONFIG_NFS_CLIENT
extern int
netboot_root(void);
#endif
STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
-#if NFSCLIENT
+#if CONFIG_NFS_CLIENT
STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
#endif
#ifdef CONFIG_IMGSRC_ACCESS
exp->p_pid = p->p_pid;
exp->p_oppid = p->p_oppid;
/* Mach related */
- exp->user_stack = p->user_stack;
exp->p_debugger = p->p_debugger;
exp->sigwait = p->sigwait;
/* scheduling */
exp->p_pid = p->p_pid;
exp->p_oppid = p->p_oppid;
/* Mach related */
- exp->user_stack = p->user_stack;
exp->p_debugger = p->p_debugger;
exp->sigwait = p->sigwait;
/* scheduling */
if (vm_map_copy_overwrite(kernel_map,
(vm_map_address_t)copy_start,
- tmp, FALSE) != KERN_SUCCESS) {
+ tmp, (vm_map_size_t) arg_size, FALSE) != KERN_SUCCESS) {
kmem_free(kernel_map, copy_start,
round_page(arg_size));
vm_map_copy_discard(tmp);
(void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE,
sysctl_perfcontrol_callout_stat, "I", "");
+#if __AMP__
+extern int sched_amp_idle_steal;
+SYSCTL_INT(_kern, OID_AUTO, sched_amp_idle_steal,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &sched_amp_idle_steal, 0, "");
+extern int sched_amp_spill_steal;
+SYSCTL_INT(_kern, OID_AUTO, sched_amp_spill_steal,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &sched_amp_spill_steal, 0, "");
+extern int sched_amp_spill_count;
+SYSCTL_INT(_kern, OID_AUTO, sched_amp_spill_count,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &sched_amp_spill_count, 0, "");
+extern int sched_amp_spill_deferred_ipi;
+SYSCTL_INT(_kern, OID_AUTO, sched_amp_spill_deferred_ipi,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &sched_amp_spill_deferred_ipi, 0, "");
+extern int sched_amp_pcores_preempt_immediate_ipi;
+SYSCTL_INT(_kern, OID_AUTO, sched_amp_pcores_preempt_immediate_ipi,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &sched_amp_pcores_preempt_immediate_ipi, 0, "");
+#endif /* __AMP__ */
#endif /* __arm__ || __arm64__ */
#if __arm64__
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
0, 0, sysctl_symfile, "A", "");
-#if NFSCLIENT
+#if CONFIG_NFS_CLIENT
STATIC int
sysctl_netboot
(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
SYSCTL_INT(_vm, OID_AUTO, compressor_available, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_available, 0, "");
+extern int min_csegs_per_major_compaction;
+SYSCTL_INT(_vm, OID_AUTO, compressor_min_csegs_per_major_compaction, CTLFLAG_RW | CTLFLAG_LOCKED, &min_csegs_per_major_compaction, 0, "");
+
SYSCTL_INT(_vm, OID_AUTO, vm_ripe_target_age_in_secs, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_ripe_target_age, 0, "");
SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
#if DEVELOPMENT || DEBUG
+extern void do_cseg_wedge_thread(void);
+extern void do_cseg_unwedge_thread(void);
+
+static int
+cseg_wedge_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+
+ do_cseg_wedge_thread();
+ return 0;
+}
+SYSCTL_PROC(_kern, OID_AUTO, cseg_wedge_thread, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, cseg_wedge_thread, "I", "wedge c_seg thread");
+
+static int
+cseg_unwedge_thread SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+
+ int error, val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || val == 0) {
+ return error;
+ }
+
+ do_cseg_unwedge_thread();
+ return 0;
+}
+SYSCTL_PROC(_kern, OID_AUTO, cseg_unwedge_thread, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, cseg_unwedge_thread, "I", "unstuck c_seg thread");
+
static atomic_int wedge_thread_should_wake = 0;
static int
extern uint64_t MutexSpin;
-SYSCTL_QUAD(_kern, OID_AUTO, mutex_spin_us, CTLFLAG_RW, &MutexSpin,
- "Spin time for acquiring a kernel mutex");
+SYSCTL_QUAD(_kern, OID_AUTO, mutex_spin_abs, CTLFLAG_RW, &MutexSpin,
+ "Spin time in abs for acquiring a kernel mutex");
+
+extern uint64_t low_MutexSpin;
+extern int64_t high_MutexSpin;
+extern unsigned int real_ncpus;
+
+SYSCTL_QUAD(_kern, OID_AUTO, low_mutex_spin_abs, CTLFLAG_RW, &low_MutexSpin,
+ "Low spin threshold in abs for acquiring a kernel mutex");
+
+static int
+sysctl_high_mutex_spin_ns SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error;
+ int64_t val = 0;
+ int64_t res;
+
+ /* Check if the user is writing to high_MutexSpin, or just reading it */
+ if (req->newptr) {
+ error = SYSCTL_IN(req, &val, sizeof(val));
+ if (error || (val < 0 && val != -1)) {
+ return error;
+ }
+ high_MutexSpin = val;
+ }
+
+ if (high_MutexSpin >= 0) {
+ res = high_MutexSpin;
+ } else {
+ res = low_MutexSpin * real_ncpus;
+ }
+ return SYSCTL_OUT(req, &res, sizeof(res));
+}
+SYSCTL_PROC(_kern, OID_AUTO, high_mutex_spin_abs, CTLFLAG_RW | CTLTYPE_QUAD, 0, 0, sysctl_high_mutex_spin_ns, "I",
+ "High spin threshold in abs for acquiring a kernel mutex");
#if defined (__x86_64__)
int error;
mach_port_name_t task_port_name;
task_t task;
- int buffer_size = (req->oldptr != USER_ADDR_NULL) ? req->oldlen : 0;
+ size_t buffer_size = (req->oldptr != USER_ADDR_NULL) ? req->oldlen : 0;
vmobject_list_output_t buffer;
size_t output_size;
if (buffer_size) {
- const int min_size = sizeof(vm_object_query_data_t) + sizeof(int64_t);
+ const size_t min_size = sizeof(vm_object_query_data_t) + sizeof(int64_t);
- if (buffer_size < min_size) {
- buffer_size = min_size;
+ if (buffer_size < min_size || buffer_size > INT_MAX) {
+ return EINVAL;
}
buffer = kalloc(buffer_size);