]> git.saurik.com Git - apple/libdispatch.git/blob - src/firehose/firehose_server.c
libdispatch-703.50.37.tar.gz
[apple/libdispatch.git] / src / firehose / firehose_server.c
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include <servers/bootstrap.h>
22 #include <sys/ioctl.h>
23 #include <sys/ttycom.h>
24 #include <sys/uio.h>
25 #include "internal.h"
26 #include "firehoseServer.h" // MiG
27 #include "firehose_reply.h" // MiG
28
29 #if __has_feature(c_static_assert)
30 _Static_assert(offsetof(struct firehose_client_s, fc_mem_sent_flushed_pos)
31 % 8 == 0, "Make sure atomic fields are properly aligned");
32 #endif
33
34 static struct firehose_server_s {
35 mach_port_t fs_bootstrap_port;
36 dispatch_mach_t fs_mach_channel;
37 dispatch_queue_t fs_ipc_queue;
38 dispatch_queue_t fs_snapshot_gate_queue;
39 dispatch_queue_t fs_io_drain_queue;
40 dispatch_queue_t fs_mem_drain_queue;
41 firehose_handler_t fs_handler;
42
43 firehose_snapshot_t fs_snapshot;
44 bool fs_io_snapshot_started;
45 bool fs_mem_snapshot_started;
46
47 int fs_kernel_fd;
48 firehose_client_t fs_kernel_client;
49
50 TAILQ_HEAD(, firehose_client_s) fs_clients;
51 } server_config = {
52 .fs_clients = TAILQ_HEAD_INITIALIZER(server_config.fs_clients),
53 .fs_kernel_fd = -1,
54 };
55
56 #pragma mark -
57 #pragma mark firehose client state machine
58
59 static void firehose_server_demux(firehose_client_t fc,
60 mach_msg_header_t *msg_hdr);
61 static void firehose_client_cancel(firehose_client_t fc);
62 static void firehose_client_snapshot_finish(firehose_client_t fc,
63 firehose_snapshot_t snapshot, bool for_io);
64
65 static void
66 firehose_client_notify(firehose_client_t fc, mach_port_t reply_port)
67 {
68 firehose_push_reply_t push_reply = {
69 .fpr_mem_flushed_pos = os_atomic_load2o(fc, fc_mem_flushed_pos,relaxed),
70 .fpr_io_flushed_pos = os_atomic_load2o(fc, fc_io_flushed_pos, relaxed),
71 };
72 kern_return_t kr;
73
74 firehose_atomic_max2o(fc, fc_mem_sent_flushed_pos,
75 push_reply.fpr_mem_flushed_pos, relaxed);
76 firehose_atomic_max2o(fc, fc_io_sent_flushed_pos,
77 push_reply.fpr_io_flushed_pos, relaxed);
78
79 if (!fc->fc_pid) {
80 if (ioctl(server_config.fs_kernel_fd, LOGFLUSHED, &push_reply) < 0) {
81 dispatch_assume_zero(errno);
82 }
83 } else {
84 if (reply_port == fc->fc_sendp) {
85 kr = firehose_send_push_notify_async(reply_port, push_reply, 0);
86 } else {
87 kr = firehose_send_push_reply(reply_port, KERN_SUCCESS, push_reply);
88 }
89 if (kr != MACH_SEND_INVALID_DEST) {
90 DISPATCH_VERIFY_MIG(kr);
91 dispatch_assume_zero(kr);
92 }
93 }
94 }
95
96 OS_ALWAYS_INLINE
97 static inline uint16_t
98 firehose_client_acquire_head(firehose_buffer_t fb, bool for_io)
99 {
100 uint16_t head;
101 if (for_io) {
102 head = os_atomic_load2o(&fb->fb_header, fbh_ring_io_head, acquire);
103 } else {
104 head = os_atomic_load2o(&fb->fb_header, fbh_ring_mem_head, acquire);
105 }
106 return head;
107 }
108
109 OS_ALWAYS_INLINE
110 static inline void
111 firehose_client_push_async_merge(firehose_client_t fc, pthread_priority_t pp,
112 bool for_io)
113 {
114 if (for_io) {
115 _dispatch_source_merge_data(fc->fc_io_source, pp, 1);
116 } else {
117 _dispatch_source_merge_data(fc->fc_mem_source, pp, 1);
118 }
119 }
120
121 OS_NOINLINE OS_COLD
122 static void
123 firehose_client_mark_corrupted(firehose_client_t fc, mach_port_t reply_port)
124 {
125 // this client is really confused, do *not* answer to asyncs anymore
126 fc->fc_memory_corrupted = true;
127 fc->fc_use_notifs = false;
128
129 // XXX: do not cancel the data sources or a corrupted client could
130 // prevent snapshots from being taken if unlucky with ordering
131
132 if (reply_port) {
133 kern_return_t kr = firehose_send_push_reply(reply_port, 0,
134 FIREHOSE_PUSH_REPLY_CORRUPTED);
135 DISPATCH_VERIFY_MIG(kr);
136 dispatch_assume_zero(kr);
137 }
138 }
139
140 OS_ALWAYS_INLINE
141 static inline void
142 firehose_client_snapshot_mark_done(firehose_client_t fc,
143 firehose_snapshot_t snapshot, bool for_io)
144 {
145 if (for_io) {
146 fc->fc_needs_io_snapshot = false;
147 } else {
148 fc->fc_needs_mem_snapshot = false;
149 }
150 dispatch_group_leave(snapshot->fs_group);
151 }
152
153 #define DRAIN_BATCH_SIZE 4
154 #define FIREHOSE_DRAIN_FOR_IO 0x1
155 #define FIREHOSE_DRAIN_POLL 0x2
156
157 OS_NOINLINE
158 static void
159 firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags)
160 {
161 firehose_buffer_t fb = fc->fc_buffer;
162 firehose_chunk_t fbc;
163 firehose_event_t evt;
164 uint16_t volatile *fbh_ring;
165 uint16_t flushed, ref, count = 0;
166 uint16_t client_head, client_flushed, sent_flushed;
167 firehose_snapshot_t snapshot = NULL;
168 bool for_io = (flags & FIREHOSE_DRAIN_FOR_IO);
169
170 if (for_io) {
171 evt = FIREHOSE_EVENT_IO_BUFFER_RECEIVED;
172 _Static_assert(FIREHOSE_EVENT_IO_BUFFER_RECEIVED ==
173 FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, "");
174 fbh_ring = fb->fb_header.fbh_io_ring;
175 sent_flushed = (uint16_t)fc->fc_io_sent_flushed_pos;
176 flushed = (uint16_t)fc->fc_io_flushed_pos;
177 if (fc->fc_needs_io_snapshot && server_config.fs_io_snapshot_started) {
178 snapshot = server_config.fs_snapshot;
179 }
180 } else {
181 evt = FIREHOSE_EVENT_MEM_BUFFER_RECEIVED;
182 _Static_assert(FIREHOSE_EVENT_MEM_BUFFER_RECEIVED ==
183 FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, "");
184 fbh_ring = fb->fb_header.fbh_mem_ring;
185 sent_flushed = (uint16_t)fc->fc_mem_sent_flushed_pos;
186 flushed = (uint16_t)fc->fc_mem_flushed_pos;
187 if (fc->fc_needs_mem_snapshot && server_config.fs_mem_snapshot_started) {
188 snapshot = server_config.fs_snapshot;
189 }
190 }
191
192 if (slowpath(fc->fc_memory_corrupted)) {
193 goto corrupt;
194 }
195
196 client_head = flushed;
197 do {
198 if ((uint16_t)(flushed + count) == client_head) {
199 client_head = firehose_client_acquire_head(fb, for_io);
200 if ((uint16_t)(flushed + count) == client_head) {
201 break;
202 }
203 if ((uint16_t)(client_head - sent_flushed) >=
204 FIREHOSE_BUFFER_CHUNK_COUNT) {
205 goto corrupt;
206 }
207 }
208
209 // see firehose_buffer_ring_enqueue
210 do {
211 ref = (flushed + count) & FIREHOSE_RING_POS_IDX_MASK;
212 ref = os_atomic_load(&fbh_ring[ref], relaxed);
213 ref &= FIREHOSE_RING_POS_IDX_MASK;
214 } while (!fc->fc_pid && !ref);
215 count++;
216 if (!ref) {
217 _dispatch_debug("Ignoring invalid page reference in ring: %d", ref);
218 continue;
219 }
220
221 fbc = firehose_buffer_ref_to_chunk(fb, ref);
222 if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) {
223 // serialize with firehose_client_metadata_stream_peek
224 os_unfair_lock_lock(&fc->fc_lock);
225 }
226 server_config.fs_handler(fc, evt, fbc);
227 if (slowpath(snapshot)) {
228 snapshot->handler(fc, evt, fbc);
229 }
230 if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) {
231 os_unfair_lock_unlock(&fc->fc_lock);
232 }
233 // clients not using notifications (single threaded) always drain fully
234 // because they use all their limit, always
235 } while (!fc->fc_use_notifs || count < DRAIN_BATCH_SIZE || snapshot);
236
237 if (count) {
238 // we don't load the full fbh_ring_tail because that is a 64bit quantity
239 // and we only need 16bits from it. and on 32bit arm, there's no way to
240 // perform an atomic load of a 64bit quantity on read-only memory.
241 if (for_io) {
242 os_atomic_add2o(fc, fc_io_flushed_pos, count, relaxed);
243 client_flushed = os_atomic_load2o(&fb->fb_header,
244 fbh_ring_tail.frp_io_flushed, relaxed);
245 } else {
246 os_atomic_add2o(fc, fc_mem_flushed_pos, count, relaxed);
247 client_flushed = os_atomic_load2o(&fb->fb_header,
248 fbh_ring_tail.frp_mem_flushed, relaxed);
249 }
250 if (!fc->fc_pid) {
251 // will fire firehose_client_notify() because port is MACH_PORT_DEAD
252 port = fc->fc_sendp;
253 } else if (!port && client_flushed == sent_flushed && fc->fc_use_notifs) {
254 port = fc->fc_sendp;
255 }
256 }
257
258 if (slowpath(snapshot)) {
259 firehose_client_snapshot_finish(fc, snapshot, for_io);
260 firehose_client_snapshot_mark_done(fc, snapshot, for_io);
261 }
262 if (port) {
263 firehose_client_notify(fc, port);
264 }
265 if (!fc->fc_pid) {
266 if (!(flags & FIREHOSE_DRAIN_POLL)) {
267 // see firehose_client_kernel_source_handle_event
268 dispatch_resume(fc->fc_kernel_source);
269 }
270 } else {
271 if (fc->fc_use_notifs && count >= DRAIN_BATCH_SIZE) {
272 // if we hit the drain batch size, the client probably logs a lot
273 // and there's more to drain, so optimistically schedule draining
274 // again this is cheap since the queue is hot, and is fair for other
275 // clients
276 firehose_client_push_async_merge(fc, 0, for_io);
277 }
278 if (count && server_config.fs_kernel_client) {
279 // the kernel is special because it can drop messages, so if we're
280 // draining, poll the kernel each time while we're bound to a thread
281 firehose_client_drain(server_config.fs_kernel_client,
282 MACH_PORT_NULL, flags | FIREHOSE_DRAIN_POLL);
283 }
284 }
285 return;
286
287 corrupt:
288 if (snapshot) {
289 firehose_client_snapshot_mark_done(fc, snapshot, for_io);
290 }
291 firehose_client_mark_corrupted(fc, port);
292 // from now on all IO/mem drains depending on `for_io` will be no-op
293 // (needs_<for_io>_snapshot: false, memory_corrupted: true). we can safely
294 // silence the corresponding source of drain wake-ups.
295 if (fc->fc_pid) {
296 dispatch_source_cancel(for_io ? fc->fc_io_source : fc->fc_mem_source);
297 }
298 }
299
300 static void
301 firehose_client_drain_io_async(void *ctx)
302 {
303 firehose_client_drain(ctx, MACH_PORT_NULL, FIREHOSE_DRAIN_FOR_IO);
304 }
305
306 static void
307 firehose_client_drain_mem_async(void *ctx)
308 {
309 firehose_client_drain(ctx, MACH_PORT_NULL, 0);
310 }
311
312 OS_NOINLINE
313 static void
314 firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED)
315 {
316 firehose_snapshot_t snapshot = server_config.fs_snapshot;
317 firehose_buffer_t fb = fc->fc_buffer;
318
319 dispatch_assert_queue(server_config.fs_io_drain_queue);
320
321 // if a client dies between phase 1 and 2 of starting the snapshot
322 // (see firehose_snapshot_start)) there's no handler to call, but the
323 // dispatch group has to be adjusted for this client going away.
324 if (fc->fc_needs_io_snapshot) {
325 dispatch_group_leave(snapshot->fs_group);
326 fc->fc_needs_io_snapshot = false;
327 }
328 if (fc->fc_needs_mem_snapshot) {
329 dispatch_group_leave(snapshot->fs_group);
330 fc->fc_needs_mem_snapshot = false;
331 }
332 if (fc->fc_memory_corrupted) {
333 server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CORRUPTED,
334 &fb->fb_chunks[0]);
335 }
336 server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_DIED, NULL);
337
338 TAILQ_REMOVE(&server_config.fs_clients, fc, fc_entry);
339 dispatch_release(fc->fc_mach_channel);
340 fc->fc_mach_channel = NULL;
341 fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS;
342 fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS;
343 _os_object_release(&fc->fc_as_os_object);
344 }
345
346 OS_NOINLINE
347 static void
348 firehose_client_handle_death(void *ctxt)
349 {
350 firehose_client_t fc = ctxt;
351 firehose_buffer_t fb = fc->fc_buffer;
352 firehose_buffer_header_t fbh = &fb->fb_header;
353 uint64_t mem_bitmap = 0, bitmap;
354
355 if (fc->fc_memory_corrupted) {
356 return firehose_client_finalize(fc);
357 }
358
359 dispatch_assert_queue(server_config.fs_io_drain_queue);
360
361 // acquire to match release barriers from threads that died
362 os_atomic_thread_fence(acquire);
363
364 bitmap = fbh->fbh_bank.fbb_bitmap & ~1ULL;
365 for (int for_io = 0; for_io < 2; for_io++) {
366 uint16_t volatile *fbh_ring;
367 uint16_t tail, flushed;
368
369 if (for_io) {
370 fbh_ring = fbh->fbh_io_ring;
371 tail = fbh->fbh_ring_tail.frp_io_tail;
372 flushed = (uint16_t)fc->fc_io_flushed_pos;
373 } else {
374 fbh_ring = fbh->fbh_mem_ring;
375 tail = fbh->fbh_ring_tail.frp_mem_tail;
376 flushed = (uint16_t)fc->fc_mem_flushed_pos;
377 }
378 if ((uint16_t)(flushed - tail) >= FIREHOSE_BUFFER_CHUNK_COUNT) {
379 fc->fc_memory_corrupted = true;
380 return firehose_client_finalize(fc);
381 }
382
383 // remove the pages that we flushed already from the bitmap
384 for (; tail != flushed; tail++) {
385 uint16_t ring_pos = tail & FIREHOSE_RING_POS_IDX_MASK;
386 uint16_t ref = fbh_ring[ring_pos] & FIREHOSE_RING_POS_IDX_MASK;
387
388 bitmap &= ~(1ULL << ref);
389 }
390 }
391
392 firehose_snapshot_t snapshot = server_config.fs_snapshot;
393
394 // Then look at all the allocated pages not seen in the ring
395 while (bitmap) {
396 uint16_t ref = firehose_bitmap_first_set(bitmap);
397 firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
398 uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs;
399
400 bitmap &= ~(1ULL << ref);
401 if (fbc->fc_start + fbc_length <= fbc->fc_data) {
402 // this page has its "recycle-requeue" done, but hasn't gone
403 // through "recycle-reuse", or it has no data, ditch it
404 continue;
405 }
406 if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) {
407 // this thing has data, but the first tracepoint is unreadable
408 // so also just ditch it
409 continue;
410 }
411 if (!fbc->fc_pos.fcp_flag_io) {
412 mem_bitmap |= 1ULL << ref;
413 continue;
414 }
415 server_config.fs_handler(fc, FIREHOSE_EVENT_IO_BUFFER_RECEIVED, fbc);
416 if (fc->fc_needs_io_snapshot && server_config.fs_io_snapshot_started) {
417 snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, fbc);
418 }
419 }
420
421 if (!mem_bitmap) {
422 return firehose_client_finalize(fc);
423 }
424
425 dispatch_async(server_config.fs_mem_drain_queue, ^{
426 uint64_t mem_bitmap_copy = mem_bitmap;
427
428 while (mem_bitmap_copy) {
429 uint16_t ref = firehose_bitmap_first_set(mem_bitmap_copy);
430 firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
431
432 mem_bitmap_copy &= ~(1ULL << ref);
433 server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, fbc);
434 if (fc->fc_needs_mem_snapshot && server_config.fs_mem_snapshot_started) {
435 snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, fbc);
436 }
437 }
438
439 dispatch_async_f(server_config.fs_io_drain_queue, fc,
440 (dispatch_function_t)firehose_client_finalize);
441 });
442 }
443
444 static void
445 firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason,
446 dispatch_mach_msg_t dmsg, mach_error_t error OS_UNUSED)
447 {
448 mach_msg_header_t *msg_hdr;
449 firehose_client_t fc = ctx;
450
451 switch (reason) {
452 case DISPATCH_MACH_MESSAGE_RECEIVED:
453 msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL);
454 if (msg_hdr->msgh_id == MACH_NOTIFY_NO_SENDERS) {
455 _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)",
456 firehose_client_get_unique_pid(fc, NULL));
457 dispatch_mach_cancel(fc->fc_mach_channel);
458 } else {
459 firehose_server_demux(fc, msg_hdr);
460 }
461 break;
462
463 case DISPATCH_MACH_CANCELED:
464 firehose_client_cancel(fc);
465 break;
466 }
467 }
468
469 #if !TARGET_OS_SIMULATOR
470 static void
471 firehose_client_kernel_source_handle_event(void *ctxt)
472 {
473 firehose_client_t fc = ctxt;
474
475 // resumed in firehose_client_drain for both memory and I/O
476 dispatch_suspend(fc->fc_kernel_source);
477 dispatch_suspend(fc->fc_kernel_source);
478 dispatch_async_f(server_config.fs_mem_drain_queue,
479 fc, firehose_client_drain_mem_async);
480 dispatch_async_f(server_config.fs_io_drain_queue,
481 fc, firehose_client_drain_io_async);
482 }
483 #endif
484
485 static inline void
486 firehose_client_resume(firehose_client_t fc,
487 const struct firehose_client_connected_info_s *fcci)
488 {
489 dispatch_assert_queue(server_config.fs_io_drain_queue);
490 TAILQ_INSERT_TAIL(&server_config.fs_clients, fc, fc_entry);
491 server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CONNECTED, (void *)fcci);
492 if (!fc->fc_pid) {
493 dispatch_activate(fc->fc_kernel_source);
494 } else {
495 dispatch_mach_connect(fc->fc_mach_channel,
496 fc->fc_recvp, fc->fc_sendp, NULL);
497 dispatch_activate(fc->fc_io_source);
498 dispatch_activate(fc->fc_mem_source);
499 }
500 }
501
502 static void
503 firehose_client_cancel(firehose_client_t fc)
504 {
505 dispatch_block_t block;
506
507 _dispatch_debug("client died (unique_pid: 0x%llx",
508 firehose_client_get_unique_pid(fc, NULL));
509
510 if (MACH_PORT_VALID(fc->fc_sendp)) {
511 firehose_mach_port_send_release(fc->fc_sendp);
512 fc->fc_sendp = MACH_PORT_NULL;
513 }
514 if (MACH_PORT_VALID(fc->fc_recvp)) {
515 firehose_mach_port_recv_dispose(fc->fc_recvp, fc);
516 fc->fc_recvp = MACH_PORT_NULL;
517 }
518 fc->fc_use_notifs = false;
519 dispatch_source_cancel(fc->fc_io_source);
520 dispatch_source_cancel(fc->fc_mem_source);
521
522 block = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{
523 dispatch_async_f(server_config.fs_io_drain_queue, fc,
524 firehose_client_handle_death);
525 });
526 dispatch_async(server_config.fs_mem_drain_queue, block);
527 _Block_release(block);
528 }
529
530 static firehose_client_t
531 _firehose_client_create(firehose_buffer_t fb)
532 {
533 firehose_client_t fc;
534
535 fc = (firehose_client_t)_os_object_alloc_realized(FIREHOSE_CLIENT_CLASS,
536 sizeof(struct firehose_client_s));
537 fc->fc_buffer = fb;
538 fc->fc_mem_flushed_pos = fb->fb_header.fbh_bank.fbb_mem_flushed;
539 fc->fc_mem_sent_flushed_pos = fc->fc_mem_flushed_pos;
540 fc->fc_io_flushed_pos = fb->fb_header.fbh_bank.fbb_io_flushed;
541 fc->fc_io_sent_flushed_pos = fc->fc_io_flushed_pos;
542 return fc;
543 }
544
545 #pragma pack(4)
546 typedef struct firehose_token_s {
547 uid_t auid;
548 uid_t euid;
549 gid_t egid;
550 uid_t ruid;
551 gid_t rgid;
552 pid_t pid;
553 au_asid_t asid;
554 dev_t execcnt;
555 } *firehose_token_t;
556 #pragma pack()
557
558 static firehose_client_t
559 firehose_client_create(firehose_buffer_t fb, firehose_token_t token,
560 mach_port_t comm_recvp, mach_port_t comm_sendp)
561 {
562 uint64_t unique_pid = fb->fb_header.fbh_uniquepid;
563 firehose_client_t fc = _firehose_client_create(fb);
564 dispatch_mach_t dm;
565 dispatch_source_t ds;
566
567 fc->fc_pid = token->pid ? token->pid : ~0;
568 fc->fc_euid = token->euid;
569 fc->fc_pidversion = token->execcnt;
570 ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0,
571 server_config.fs_mem_drain_queue);
572 _os_object_retain_internal_inline(&fc->fc_as_os_object);
573 dispatch_set_context(ds, fc);
574 dispatch_set_finalizer_f(ds,
575 (dispatch_function_t)_os_object_release_internal);
576 dispatch_source_set_event_handler_f(ds, firehose_client_drain_mem_async);
577 fc->fc_mem_source = ds;
578
579 ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0,
580 server_config.fs_io_drain_queue);
581 _os_object_retain_internal_inline(&fc->fc_as_os_object);
582 dispatch_set_context(ds, fc);
583 dispatch_set_finalizer_f(ds,
584 (dispatch_function_t)_os_object_release_internal);
585 dispatch_source_set_event_handler_f(ds, firehose_client_drain_io_async);
586 fc->fc_io_source = ds;
587
588 _dispatch_debug("FIREHOSE_REGISTER (unique_pid: 0x%llx)", unique_pid);
589 fc->fc_recvp = comm_recvp;
590 fc->fc_sendp = comm_sendp;
591 firehose_mach_port_guard(comm_recvp, true, fc);
592 dm = dispatch_mach_create_f("com.apple.firehose.peer",
593 server_config.fs_ipc_queue,
594 fc, firehose_client_handle_mach_event);
595 fc->fc_mach_channel = dm;
596 return fc;
597 }
598
599 static void
600 firehose_kernel_client_create(void)
601 {
602 #if !TARGET_OS_SIMULATOR
603 struct firehose_server_s *fs = &server_config;
604 firehose_buffer_map_info_t fb_map;
605 firehose_client_t fc;
606 dispatch_source_t ds;
607 int fd;
608
609 while ((fd = open("/dev/oslog", O_RDWR)) < 0) {
610 if (errno == EINTR) {
611 continue;
612 }
613 if (errno == ENOENT) {
614 return;
615 }
616 DISPATCH_INTERNAL_CRASH(errno, "Unable to open /dev/oslog");
617 }
618
619 while (ioctl(fd, LOGBUFFERMAP, &fb_map) < 0) {
620 if (errno == EINTR) {
621 continue;
622 }
623 DISPATCH_INTERNAL_CRASH(errno, "Unable to map kernel buffer");
624 }
625 if (fb_map.fbmi_size !=
626 FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE) {
627 DISPATCH_INTERNAL_CRASH(fb_map.fbmi_size, "Unexpected kernel buffer size");
628 }
629
630 fc = _firehose_client_create((firehose_buffer_t)(uintptr_t)fb_map.fbmi_addr);
631 ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0,
632 fs->fs_ipc_queue);
633 dispatch_set_context(ds, fc);
634 dispatch_source_set_event_handler_f(ds,
635 firehose_client_kernel_source_handle_event);
636 fc->fc_kernel_source = ds;
637 fc->fc_use_notifs = true;
638 fc->fc_sendp = MACH_PORT_DEAD; // causes drain() to call notify
639
640 fs->fs_kernel_fd = fd;
641 fs->fs_kernel_client = fc;
642 #endif
643 }
644
645 void
646 _firehose_client_dispose(firehose_client_t fc)
647 {
648 vm_deallocate(mach_task_self(), (vm_address_t)fc->fc_buffer,
649 sizeof(*fc->fc_buffer));
650 fc->fc_buffer = NULL;
651 server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_FINALIZE, NULL);
652 }
653
654 void
655 _firehose_client_xref_dispose(firehose_client_t fc)
656 {
657 _dispatch_debug("Cleaning up client info for unique_pid 0x%llx",
658 firehose_client_get_unique_pid(fc, NULL));
659
660 dispatch_release(fc->fc_io_source);
661 fc->fc_io_source = NULL;
662
663 dispatch_release(fc->fc_mem_source);
664 fc->fc_mem_source = NULL;
665 }
666
667 uint64_t
668 firehose_client_get_unique_pid(firehose_client_t fc, pid_t *pid_out)
669 {
670 firehose_buffer_header_t fbh = &fc->fc_buffer->fb_header;
671 if (pid_out) *pid_out = fc->fc_pid;
672 if (!fc->fc_pid) return 0;
673 return fbh->fbh_uniquepid ? fbh->fbh_uniquepid : ~0ull;
674 }
675
676 uid_t
677 firehose_client_get_euid(firehose_client_t fc)
678 {
679 return fc->fc_euid;
680 }
681
682 int
683 firehose_client_get_pid_version(firehose_client_t fc)
684 {
685 return fc->fc_pidversion;
686 }
687
688 void *
689 firehose_client_get_metadata_buffer(firehose_client_t client, size_t *size)
690 {
691 firehose_buffer_header_t fbh = &client->fc_buffer->fb_header;
692
693 *size = FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE;
694 return (void *)((uintptr_t)(fbh + 1) - *size);
695 }
696
697 void *
698 firehose_client_get_context(firehose_client_t fc)
699 {
700 return os_atomic_load2o(fc, fc_ctxt, relaxed);
701 }
702
703 void *
704 firehose_client_set_context(firehose_client_t fc, void *ctxt)
705 {
706 return os_atomic_xchg2o(fc, fc_ctxt, ctxt, relaxed);
707 }
708
709 #pragma mark -
710 #pragma mark firehose server
711
712 /*
713 * The current_message context stores the client info for the current message
714 * being handled. The only reason this works is because currently the message
715 * processing is serial. If that changes, this would not work.
716 */
717 static firehose_client_t cur_client_info;
718
719 static void
720 firehose_server_handle_mach_event(void *ctx OS_UNUSED,
721 dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg,
722 mach_error_t error OS_UNUSED)
723 {
724 mach_msg_header_t *msg_hdr = NULL;
725
726 if (reason == DISPATCH_MACH_MESSAGE_RECEIVED) {
727 msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL);
728 /* TODO: Assert this should be a register message */
729 firehose_server_demux(NULL, msg_hdr);
730 }
731 }
732
733 void
734 firehose_server_init(mach_port_t comm_port, firehose_handler_t handler)
735 {
736 struct firehose_server_s *fs = &server_config;
737 dispatch_queue_attr_t attr;
738 dispatch_mach_t dm;
739
740 // just reference the string so that it's captured
741 (void)os_atomic_load(&__libfirehose_serverVersionString[0], relaxed);
742
743 attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL,
744 QOS_CLASS_USER_INITIATED, 0);
745 fs->fs_ipc_queue = dispatch_queue_create_with_target(
746 "com.apple.firehose.ipc", attr, NULL);
747 fs->fs_snapshot_gate_queue = dispatch_queue_create_with_target(
748 "com.apple.firehose.snapshot-gate", DISPATCH_QUEUE_SERIAL, NULL);
749 fs->fs_io_drain_queue = dispatch_queue_create_with_target(
750 "com.apple.firehose.drain-io", DISPATCH_QUEUE_SERIAL, NULL);
751 fs->fs_mem_drain_queue = dispatch_queue_create_with_target(
752 "com.apple.firehose.drain-mem", DISPATCH_QUEUE_SERIAL, NULL);
753
754 dm = dispatch_mach_create_f("com.apple.firehose.listener",
755 fs->fs_ipc_queue, NULL, firehose_server_handle_mach_event);
756 fs->fs_bootstrap_port = comm_port;
757 fs->fs_mach_channel = dm;
758 fs->fs_handler = _Block_copy(handler);
759 firehose_kernel_client_create();
760 }
761
762 void
763 firehose_server_assert_spi_version(uint32_t spi_version)
764 {
765 if (spi_version != OS_FIREHOSE_SPI_VERSION) {
766 DISPATCH_CLIENT_CRASH(spi_version, "firehose server version mismatch ("
767 OS_STRINGIFY(OS_FIREHOSE_SPI_VERSION) ")");
768 }
769 if (_firehose_spi_version != OS_FIREHOSE_SPI_VERSION) {
770 DISPATCH_CLIENT_CRASH(_firehose_spi_version,
771 "firehose libdispatch version mismatch ("
772 OS_STRINGIFY(OS_FIREHOSE_SPI_VERSION) ")");
773
774 }
775 }
776
777 bool
778 firehose_server_has_ever_flushed_pages(void)
779 {
780 // Use the IO pages flushed count from the kernel client as an
781 // approximation for whether the firehose has ever flushed pages during
782 // this boot. logd uses this detect the first time it starts after a
783 // fresh boot.
784 firehose_client_t fhc = server_config.fs_kernel_client;
785 return !fhc || fhc->fc_io_flushed_pos > 0;
786 }
787
788 void
789 firehose_server_resume(void)
790 {
791 struct firehose_server_s *fs = &server_config;
792
793 if (fs->fs_kernel_client) {
794 dispatch_async(fs->fs_io_drain_queue, ^{
795 struct firehose_client_connected_info_s fcci = {
796 .fcci_version = FIREHOSE_CLIENT_CONNECTED_INFO_VERSION,
797 };
798 firehose_client_resume(fs->fs_kernel_client, &fcci);
799 });
800 }
801 dispatch_mach_connect(fs->fs_mach_channel, fs->fs_bootstrap_port,
802 MACH_PORT_NULL, NULL);
803 }
804
805 OS_NOINLINE
806 static void
807 _firehose_server_cancel(void *ctxt OS_UNUSED)
808 {
809 firehose_client_t fc;
810 TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) {
811 dispatch_mach_cancel(fc->fc_mach_channel);
812 }
813 }
814
815 void
816 firehose_server_cancel(void)
817 {
818 dispatch_mach_cancel(server_config.fs_mach_channel);
819 dispatch_async_f(server_config.fs_io_drain_queue, NULL,
820 _firehose_server_cancel);
821 }
822
823 dispatch_queue_t
824 firehose_server_copy_queue(firehose_server_queue_t which)
825 {
826 dispatch_queue_t dq;
827 switch (which) {
828 case FIREHOSE_SERVER_QUEUE_IO:
829 dq = server_config.fs_io_drain_queue;
830 break;
831 case FIREHOSE_SERVER_QUEUE_MEMORY:
832 dq = server_config.fs_mem_drain_queue;
833 break;
834 default:
835 DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type");
836 }
837 dispatch_retain(dq);
838 return dq;
839 }
840
841 #pragma mark -
842 #pragma mark firehose snapshot and peeking
843
844 void
845 firehose_client_metadata_stream_peek(firehose_client_t fc,
846 OS_UNUSED firehose_event_t context, bool (^peek_should_start)(void),
847 bool (^peek)(firehose_chunk_t fbc))
848 {
849 os_unfair_lock_lock(&fc->fc_lock);
850
851 if (peek_should_start && peek_should_start()) {
852 firehose_buffer_t fb = fc->fc_buffer;
853 firehose_buffer_header_t fbh = &fb->fb_header;
854 uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap;
855
856 while (bitmap) {
857 uint16_t ref = firehose_bitmap_first_set(bitmap);
858 firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
859 uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs;
860
861 bitmap &= ~(1ULL << ref);
862 if (fbc->fc_start + fbc_length <= fbc->fc_data) {
863 // this page has its "recycle-requeue" done, but hasn't gone
864 // through "recycle-reuse", or it has no data, ditch it
865 continue;
866 }
867 if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) {
868 // this thing has data, but the first tracepoint is unreadable
869 // so also just ditch it
870 continue;
871 }
872 if (fbc->fc_pos.fcp_stream != firehose_stream_metadata) {
873 continue;
874 }
875 if (!peek(fbc)) {
876 break;
877 }
878 }
879 }
880
881 os_unfair_lock_unlock(&fc->fc_lock);
882 }
883
884 OS_NOINLINE OS_COLD
885 static void
886 firehose_client_snapshot_finish(firehose_client_t fc,
887 firehose_snapshot_t snapshot, bool for_io)
888 {
889 firehose_buffer_t fb = fc->fc_buffer;
890 firehose_buffer_header_t fbh = &fb->fb_header;
891 firehose_snapshot_event_t evt;
892 uint16_t volatile *fbh_ring;
893 uint16_t tail, flushed;
894 uint64_t bitmap;
895
896 bitmap = ~1ULL;
897
898 if (for_io) {
899 fbh_ring = fbh->fbh_io_ring;
900 tail = fbh->fbh_ring_tail.frp_io_tail;
901 flushed = (uint16_t)fc->fc_io_flushed_pos;
902 evt = FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER;
903 } else {
904 fbh_ring = fbh->fbh_mem_ring;
905 tail = fbh->fbh_ring_tail.frp_mem_tail;
906 flushed = (uint16_t)fc->fc_mem_flushed_pos;
907 evt = FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER;
908 }
909 if ((uint16_t)(flushed - tail) >= FIREHOSE_BUFFER_CHUNK_COUNT) {
910 fc->fc_memory_corrupted = true;
911 return;
912 }
913
914 // remove the pages that we flushed already from the bitmap
915 for (; tail != flushed; tail++) {
916 uint16_t idx = tail & FIREHOSE_RING_POS_IDX_MASK;
917 uint16_t ref = fbh_ring[idx] & FIREHOSE_RING_POS_IDX_MASK;
918
919 bitmap &= ~(1ULL << ref);
920 }
921
922 // Remove pages that are free by AND-ing with the allocating bitmap.
923 // The load of fbb_bitmap may not be atomic, but it's ok because bits
924 // being flipped are pages we don't care about snapshotting. The worst thing
925 // that can happen is that we go peek at an unmapped page and we fault it in
926 bitmap &= fbh->fbh_bank.fbb_bitmap;
927
928 // Then look at all the allocated pages not seen in the ring
929 while (bitmap) {
930 uint16_t ref = firehose_bitmap_first_set(bitmap);
931 firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
932 uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs;
933
934 bitmap &= ~(1ULL << ref);
935 if (fbc->fc_start + fbc_length <= fbc->fc_data) {
936 // this page has its "recycle-requeue" done, but hasn't gone
937 // through "recycle-reuse", or it has no data, ditch it
938 continue;
939 }
940 if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) {
941 // this thing has data, but the first tracepoint is unreadable
942 // so also just ditch it
943 continue;
944 }
945 if (fbc->fc_pos.fcp_flag_io != for_io) {
946 continue;
947 }
948 snapshot->handler(fc, evt, fbc);
949 }
950 }
951
952 static void
953 firehose_snapshot_start(void *ctxt)
954 {
955 firehose_snapshot_t snapshot = ctxt;
956 firehose_client_t fci;
957 long n = 0;
958
959 // 0. we need to be on the IO queue so that client connection and/or death
960 // cannot happen concurrently
961 dispatch_assert_queue(server_config.fs_io_drain_queue);
962 server_config.fs_snapshot = snapshot;
963
964 // 1. mark all the clients participating in the current snapshot
965 // and enter the group for each bit set
966 TAILQ_FOREACH(fci, &server_config.fs_clients, fc_entry) {
967 if (!fci->fc_pid) {
968 #if TARGET_OS_SIMULATOR
969 continue;
970 #endif
971 }
972 if (slowpath(fci->fc_memory_corrupted)) {
973 continue;
974 }
975 fci->fc_needs_io_snapshot = true;
976 fci->fc_needs_mem_snapshot = true;
977 n += 2;
978 }
979 if (n) {
980 // cheating: equivalent to dispatch_group_enter() n times
981 // without the acquire barriers that we don't need
982 os_atomic_add2o(snapshot->fs_group, dg_value, n, relaxed);
983 }
984
985 dispatch_async(server_config.fs_mem_drain_queue, ^{
986 // 2. start the fs_mem_snapshot, this is what triggers the snapshot
987 // logic from _drain() or handle_death()
988 server_config.fs_mem_snapshot_started = true;
989 snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL);
990
991 dispatch_async(server_config.fs_io_drain_queue, ^{
992 firehose_client_t fcj;
993
994 // 3. start the fs_io_snapshot, this is what triggers the snapshot
995 // logic from _drain() or handle_death()
996 // 29868879: must always happen after the memory snapshot started
997 server_config.fs_io_snapshot_started = true;
998 snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL);
999
1000 // match group_enter from firehose_snapshot() after MEM+IO_START
1001 dispatch_group_leave(snapshot->fs_group);
1002
1003 // 3. tickle all the clients. the list of clients may have changed
1004 // since step 1, but worry not - new clients don't have
1005 // fc_needs_*_snapshot set so drain is harmless; clients that
1006 // were removed from the list have already left the group
1007 // (see firehose_client_finalize())
1008 TAILQ_FOREACH(fcj, &server_config.fs_clients, fc_entry) {
1009 if (!fcj->fc_pid) {
1010 #if !TARGET_OS_SIMULATOR
1011 firehose_client_kernel_source_handle_event(fcj);
1012 #endif
1013 } else {
1014 dispatch_source_merge_data(fcj->fc_io_source, 1);
1015 dispatch_source_merge_data(fcj->fc_mem_source, 1);
1016 }
1017 }
1018 });
1019 });
1020 }
1021
1022 static void
1023 firehose_snapshot_finish(void *ctxt)
1024 {
1025 firehose_snapshot_t fs = ctxt;
1026
1027 fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_COMPLETE, NULL);
1028 server_config.fs_snapshot = NULL;
1029 server_config.fs_mem_snapshot_started = false;
1030 server_config.fs_io_snapshot_started = false;
1031
1032 dispatch_release(fs->fs_group);
1033 Block_release(fs->handler);
1034 free(fs);
1035
1036 // resume the snapshot gate queue to maybe handle the next snapshot
1037 dispatch_resume(server_config.fs_snapshot_gate_queue);
1038 }
1039
1040 static void
1041 firehose_snapshot_gate(void *ctxt)
1042 {
1043 // prevent other snapshots from running until done
1044 dispatch_suspend(server_config.fs_snapshot_gate_queue);
1045 dispatch_async_f(server_config.fs_io_drain_queue, ctxt,
1046 firehose_snapshot_start);
1047 }
1048
1049 void
1050 firehose_snapshot(firehose_snapshot_handler_t handler)
1051 {
1052 firehose_snapshot_t snapshot = malloc(sizeof(struct firehose_snapshot_s));
1053
1054 snapshot->handler = Block_copy(handler);
1055 snapshot->fs_group = dispatch_group_create();
1056
1057 // keep the group entered until IO_START and MEM_START have been sent
1058 // See firehose_snapshot_start()
1059 dispatch_group_enter(snapshot->fs_group);
1060 dispatch_group_notify_f(snapshot->fs_group, server_config.fs_io_drain_queue,
1061 snapshot, firehose_snapshot_finish);
1062
1063 dispatch_async_f(server_config.fs_snapshot_gate_queue, snapshot,
1064 firehose_snapshot_gate);
1065 }
1066
1067 #pragma mark -
1068 #pragma mark MiG handler routines
1069
1070 kern_return_t
1071 firehose_server_register(mach_port_t server_port OS_UNUSED,
1072 mach_port_t mem_port, mach_vm_size_t mem_size,
1073 mach_port_t comm_recvp, mach_port_t comm_sendp,
1074 mach_port_t extra_info_port, mach_vm_size_t extra_info_size,
1075 audit_token_t atoken)
1076 {
1077 mach_vm_address_t base_addr = 0;
1078 firehose_client_t fc = NULL;
1079 kern_return_t kr;
1080 struct firehose_client_connected_info_s fcci = {
1081 .fcci_version = FIREHOSE_CLIENT_CONNECTED_INFO_VERSION,
1082 };
1083
1084 if (mem_size != sizeof(union firehose_buffer_u)) {
1085 return KERN_INVALID_VALUE;
1086 }
1087
1088 /*
1089 * Request a MACH_NOTIFY_NO_SENDERS notification for recvp. That should
1090 * indicate the client going away.
1091 */
1092 mach_port_t previous = MACH_PORT_NULL;
1093 kr = mach_port_request_notification(mach_task_self(), comm_recvp,
1094 MACH_NOTIFY_NO_SENDERS, 0, comm_recvp,
1095 MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
1096 DISPATCH_VERIFY_MIG(kr);
1097 if (dispatch_assume_zero(kr)) {
1098 return KERN_FAILURE;
1099 }
1100 dispatch_assert(previous == MACH_PORT_NULL);
1101
1102 /* Map the memory handle into the server address space */
1103 kr = mach_vm_map(mach_task_self(), &base_addr, mem_size, 0,
1104 VM_FLAGS_ANYWHERE, mem_port, 0, FALSE,
1105 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
1106 DISPATCH_VERIFY_MIG(kr);
1107 if (dispatch_assume_zero(kr)) {
1108 return KERN_NO_SPACE;
1109 }
1110
1111 if (extra_info_port && extra_info_size) {
1112 mach_vm_address_t addr = 0;
1113 kr = mach_vm_map(mach_task_self(), &addr, extra_info_size, 0,
1114 VM_FLAGS_ANYWHERE, extra_info_port, 0, FALSE,
1115 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
1116 if (dispatch_assume_zero(kr)) {
1117 mach_vm_deallocate(mach_task_self(), base_addr, mem_size);
1118 return KERN_NO_SPACE;
1119 }
1120 fcci.fcci_data = (void *)(uintptr_t)addr;
1121 fcci.fcci_size = (size_t)extra_info_size;
1122 }
1123
1124 fc = firehose_client_create((firehose_buffer_t)base_addr,
1125 (firehose_token_t)&atoken, comm_recvp, comm_sendp);
1126 dispatch_async(server_config.fs_io_drain_queue, ^{
1127 firehose_client_resume(fc, &fcci);
1128 if (fcci.fcci_size) {
1129 vm_deallocate(mach_task_self(), (vm_address_t)fcci.fcci_data,
1130 fcci.fcci_size);
1131 }
1132 });
1133
1134 if (extra_info_port) firehose_mach_port_send_release(extra_info_port);
1135 firehose_mach_port_send_release(mem_port);
1136 return KERN_SUCCESS;
1137 }
1138
1139 kern_return_t
1140 firehose_server_push_async(mach_port_t server_port OS_UNUSED,
1141 qos_class_t qos, boolean_t for_io, boolean_t expects_notifs)
1142 {
1143 firehose_client_t fc = cur_client_info;
1144 pthread_priority_t pp = _pthread_qos_class_encode(qos, 0,
1145 _PTHREAD_PRIORITY_ENFORCE_FLAG);
1146
1147 _dispatch_debug("FIREHOSE_PUSH_ASYNC (unique_pid %llx)",
1148 firehose_client_get_unique_pid(fc, NULL));
1149 if (!slowpath(fc->fc_memory_corrupted)) {
1150 if (expects_notifs && !fc->fc_use_notifs) {
1151 fc->fc_use_notifs = true;
1152 }
1153 firehose_client_push_async_merge(fc, pp, for_io);
1154 }
1155 return KERN_SUCCESS;
1156 }
1157
1158 kern_return_t
1159 firehose_server_push(mach_port_t server_port OS_UNUSED,
1160 mach_port_t reply_port, qos_class_t qos, boolean_t for_io,
1161 firehose_push_reply_t *push_reply OS_UNUSED)
1162 {
1163 firehose_client_t fc = cur_client_info;
1164 dispatch_block_flags_t flags = DISPATCH_BLOCK_ENFORCE_QOS_CLASS;
1165 dispatch_block_t block;
1166 dispatch_queue_t q;
1167
1168 _dispatch_debug("FIREHOSE_PUSH (unique_pid %llx)",
1169 firehose_client_get_unique_pid(fc, NULL));
1170
1171 if (slowpath(fc->fc_memory_corrupted)) {
1172 firehose_client_mark_corrupted(fc, reply_port);
1173 return MIG_NO_REPLY;
1174 }
1175
1176 if (for_io) {
1177 q = server_config.fs_io_drain_queue;
1178 } else {
1179 q = server_config.fs_mem_drain_queue;
1180 }
1181
1182 block = dispatch_block_create_with_qos_class(flags, qos, 0, ^{
1183 firehose_client_drain(fc, reply_port,
1184 for_io ? FIREHOSE_DRAIN_FOR_IO : 0);
1185 });
1186 dispatch_async(q, block);
1187 _Block_release(block);
1188 return MIG_NO_REPLY;
1189 }
1190
1191 static void
1192 firehose_server_demux(firehose_client_t fc, mach_msg_header_t *msg_hdr)
1193 {
1194 const size_t reply_size =
1195 sizeof(union __ReplyUnion__firehose_server_firehose_subsystem);
1196
1197 cur_client_info = fc;
1198 firehose_mig_server(firehose_server, reply_size, msg_hdr);
1199 }