+
+static void
+hfs_syncer(void *arg0, void *unused)
+{
+#pragma unused(unused)
+
+ struct hfsmount *hfsmp = arg0;
+ clock_sec_t secs;
+ clock_usec_t usecs;
+ uint32_t delay = HFS_META_DELAY;
+ uint64_t now;
+ static int no_max=1;
+
+ clock_get_calendar_microtime(&secs, &usecs);
+ now = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs;
+
+ //
+ // If the amount of pending writes is more than our limit, wait
+ // for 2/3 of it to drain and then flush the journal.
+ //
+ if (hfsmp->hfs_mp->mnt_pending_write_size > hfsmp->hfs_max_pending_io) {
+ int counter=0;
+ uint64_t pending_io, start, rate;
+
+ no_max = 0;
+
+ hfs_start_transaction(hfsmp); // so we hold off any new i/o's
+
+ pending_io = hfsmp->hfs_mp->mnt_pending_write_size;
+
+ clock_get_calendar_microtime(&secs, &usecs);
+ start = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs;
+
+ while(hfsmp->hfs_mp->mnt_pending_write_size > (pending_io/3) && counter++ < 500) {
+ tsleep((caddr_t)hfsmp, PRIBIO, "hfs-wait-for-io-to-drain", 10);
+ }
+
+ if (counter >= 500) {
+ printf("hfs: timed out waiting for io to drain (%lld)\n", (int64_t)hfsmp->hfs_mp->mnt_pending_write_size);
+ }
+
+ if (hfsmp->jnl) {
+ journal_flush(hfsmp->jnl);
+ } else {
+ hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_kernel());
+ }
+
+ clock_get_calendar_microtime(&secs, &usecs);
+ now = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs;
+ hfsmp->hfs_last_sync_time = now;
+ rate = ((pending_io * 1000000ULL) / (now - start)); // yields bytes per second
+
+ hfs_end_transaction(hfsmp);
+
+ //
+ // If a reasonable amount of time elapsed then check the
+ // i/o rate. If it's taking less than 1 second or more
+ // than 2 seconds, adjust hfs_max_pending_io so that we
+ // will allow about 1.5 seconds of i/o to queue up.
+ //
+ if ((now - start) >= 300000) {
+ uint64_t scale = (pending_io * 100) / rate;
+
+ if (scale < 100 || scale > 200) {
+ // set it so that it should take about 1.5 seconds to drain
+ hfsmp->hfs_max_pending_io = (rate * 150ULL) / 100ULL;
+ }
+ }
+
+ } else if ( ((now - hfsmp->hfs_last_sync_time) >= 5000000ULL)
+ || (((now - hfsmp->hfs_last_sync_time) >= 100000LL)
+ && ((now - hfsmp->hfs_last_sync_request_time) >= 100000LL)
+ && (hfsmp->hfs_active_threads == 0)
+ && (hfsmp->hfs_global_lock_nesting == 0))) {
+
+ //
+ // Flush the journal if more than 5 seconds elapsed since
+ // the last sync OR we have not sync'ed recently and the
+ // last sync request time was more than 100 milliseconds
+ // ago and no one is in the middle of a transaction right
+ // now. Else we defer the sync and reschedule it.
+ //
+ if (hfsmp->jnl) {
+ lck_rw_lock_shared(&hfsmp->hfs_global_lock);
+
+ journal_flush(hfsmp->jnl);
+
+ lck_rw_unlock_shared(&hfsmp->hfs_global_lock);
+ } else {
+ hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_kernel());
+ }
+
+ clock_get_calendar_microtime(&secs, &usecs);
+ now = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs;
+ hfsmp->hfs_last_sync_time = now;
+
+ } else if (hfsmp->hfs_active_threads == 0) {
+ uint64_t deadline;
+
+ clock_interval_to_deadline(delay, HFS_MILLISEC_SCALE, &deadline);
+ thread_call_enter_delayed(hfsmp->hfs_syncer, deadline);
+
+ // note: we intentionally return early here and do not
+ // decrement the sync_scheduled and sync_incomplete
+ // variables because we rescheduled the timer.
+
+ return;
+ }
+
+ //
+ // NOTE: we decrement these *after* we're done the journal_flush() since
+ // it can take a significant amount of time and so we don't want more
+ // callbacks scheduled until we're done this one.
+ //
+ OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_scheduled);
+ OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_incomplete);
+ wakeup((caddr_t)&hfsmp->hfs_sync_incomplete);
+}
+
+
+extern int IOBSDIsMediaEjectable( const char *cdev_name );
+