+ if (devbsdunit < LOWPRI_MAX_NUM_DEV) {
+ microuptime(&_throttle_io_info[devbsdunit].last_IO_timestamp);
+ }
+}
+
+int throttle_io_will_be_throttled(int lowpri_window_msecs, size_t devbsdunit)
+{
+ struct timeval elapsed;
+ int elapsed_msecs;
+
+ microuptime(&elapsed);
+ timevalsub(&elapsed, &_throttle_io_info[devbsdunit].last_normal_IO_timestamp);
+ elapsed_msecs = elapsed.tv_sec * 1000 + elapsed.tv_usec / 1000;
+
+ if (lowpri_window_msecs == -1) // use the max waiting time
+ lowpri_window_msecs = lowpri_max_waiting_msecs;
+
+ return elapsed_msecs < lowpri_window_msecs;
+}
+
+void throttle_lowpri_io(boolean_t ok_to_sleep)
+{
+ int i;
+ int max_try_num;
+ struct uthread *ut;
+
+ ut = get_bsdthread_info(current_thread());
+
+ if (ut->uu_lowpri_window == 0)
+ return;
+
+ max_try_num = lowpri_max_waiting_msecs / LOWPRI_SLEEP_INTERVAL * MAX(1, _throttle_io_info[ut->uu_devbsdunit].numthreads_throttling);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START,
+ ut->uu_lowpri_window, 0, 0, 0, 0);
+
+ if (ok_to_sleep == TRUE) {
+ for (i=0; i<max_try_num; i++) {
+ if (throttle_io_will_be_throttled(ut->uu_lowpri_window, ut->uu_devbsdunit)) {
+ IOSleep(LOWPRI_SLEEP_INTERVAL);
+ } else {
+ break;
+ }
+ }
+ }
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END,
+ ut->uu_lowpri_window, i*5, 0, 0, 0);
+ SInt32 oldValue;
+ oldValue = OSDecrementAtomic(&_throttle_io_info[ut->uu_devbsdunit].numthreads_throttling);
+ ut->uu_lowpri_window = 0;
+
+ if (oldValue <= 0) {
+ panic("%s: numthreads negative", __func__);
+ }
+}