2  * Copyright (c) 2015 Apple Inc. All rights reserved. 
   4  * @APPLE_APACHE_LICENSE_HEADER_START@ 
   6  * Licensed under the Apache License, Version 2.0 (the "License"); 
   7  * you may not use this file except in compliance with the License. 
   8  * You may obtain a copy of the License at 
  10  *     http://www.apache.org/licenses/LICENSE-2.0 
  12  * Unless required by applicable law or agreed to in writing, software 
  13  * distributed under the License is distributed on an "AS IS" BASIS, 
  14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  15  * See the License for the specific language governing permissions and 
  16  * limitations under the License. 
  18  * @APPLE_APACHE_LICENSE_HEADER_END@ 
  21 #include <servers/bootstrap.h> 
  22 #include <sys/ioctl.h> 
  23 #include <sys/ttycom.h> 
  26 #include "firehoseServer.h" // MiG 
  27 #include "firehose_reply.h" // MiG 
  29 #if __has_feature(c_static_assert) 
  30 _Static_assert(offsetof(struct firehose_client_s
, fc_mem_sent_flushed_pos
) 
  31                 % 8 == 0, "Make sure atomic fields are properly aligned"); 
  34 static struct firehose_server_s 
{ 
  35         mach_port_t                     fs_bootstrap_port
; 
  36         dispatch_mach_t         fs_mach_channel
; 
  37         dispatch_queue_t        fs_ipc_queue
; 
  38         dispatch_queue_t        fs_snapshot_gate_queue
; 
  39         dispatch_queue_t        fs_io_drain_queue
; 
  40         dispatch_queue_t        fs_mem_drain_queue
; 
  41         firehose_handler_t      fs_handler
; 
  43         firehose_snapshot_t fs_snapshot
; 
  44         bool                fs_io_snapshot_started
; 
  45         bool                fs_mem_snapshot_started
; 
  48         firehose_client_t       fs_kernel_client
; 
  50         TAILQ_HEAD(, firehose_client_s
) fs_clients
; 
  52         .fs_clients 
= TAILQ_HEAD_INITIALIZER(server_config
.fs_clients
), 
  57 #pragma mark firehose client state machine 
  59 static void firehose_server_demux(firehose_client_t fc
, 
  60                 mach_msg_header_t 
*msg_hdr
); 
  61 static void firehose_client_cancel(firehose_client_t fc
); 
  62 static void firehose_client_snapshot_finish(firehose_client_t fc
, 
  63                 firehose_snapshot_t snapshot
, bool for_io
); 
  66 firehose_client_notify(firehose_client_t fc
, mach_port_t reply_port
) 
  68         firehose_push_reply_t push_reply 
= { 
  69                 .fpr_mem_flushed_pos 
= os_atomic_load2o(fc
, fc_mem_flushed_pos
,relaxed
), 
  70                 .fpr_io_flushed_pos 
= os_atomic_load2o(fc
, fc_io_flushed_pos
, relaxed
), 
  74         firehose_atomic_max2o(fc
, fc_mem_sent_flushed_pos
, 
  75                         push_reply
.fpr_mem_flushed_pos
, relaxed
); 
  76         firehose_atomic_max2o(fc
, fc_io_sent_flushed_pos
, 
  77                         push_reply
.fpr_io_flushed_pos
, relaxed
); 
  80                 if (ioctl(server_config
.fs_kernel_fd
, LOGFLUSHED
, &push_reply
) < 0) { 
  81                         dispatch_assume_zero(errno
); 
  84                 if (reply_port 
== fc
->fc_sendp
) { 
  85                         kr 
= firehose_send_push_notify_async(reply_port
, push_reply
, 0); 
  87                         kr 
= firehose_send_push_reply(reply_port
, KERN_SUCCESS
, push_reply
); 
  89                 if (kr 
!= MACH_SEND_INVALID_DEST
) { 
  90                         DISPATCH_VERIFY_MIG(kr
); 
  91                         dispatch_assume_zero(kr
); 
  97 static inline uint16_t 
  98 firehose_client_acquire_head(firehose_buffer_t fb
, bool for_io
) 
 102                 head 
= os_atomic_load2o(&fb
->fb_header
, fbh_ring_io_head
, acquire
); 
 104                 head 
= os_atomic_load2o(&fb
->fb_header
, fbh_ring_mem_head
, acquire
); 
 111 firehose_client_push_async_merge(firehose_client_t fc
, pthread_priority_t pp
, 
 115                 _dispatch_source_merge_data(fc
->fc_io_source
, pp
, 1); 
 117                 _dispatch_source_merge_data(fc
->fc_mem_source
, pp
, 1); 
 123 firehose_client_mark_corrupted(firehose_client_t fc
, mach_port_t reply_port
) 
 125         // this client is really confused, do *not* answer to asyncs anymore 
 126         fc
->fc_memory_corrupted 
= true; 
 127         fc
->fc_use_notifs 
= false; 
 129         // XXX: do not cancel the data sources or a corrupted client could 
 130         // prevent snapshots from being taken if unlucky with ordering 
 133                 kern_return_t kr 
= firehose_send_push_reply(reply_port
, 0, 
 134                                 FIREHOSE_PUSH_REPLY_CORRUPTED
); 
 135                 DISPATCH_VERIFY_MIG(kr
); 
 136                 dispatch_assume_zero(kr
); 
 142 firehose_client_snapshot_mark_done(firehose_client_t fc
, 
 143                 firehose_snapshot_t snapshot
, bool for_io
) 
 146                 fc
->fc_needs_io_snapshot 
= false; 
 148                 fc
->fc_needs_mem_snapshot 
= false; 
 150         dispatch_group_leave(snapshot
->fs_group
); 
 153 #define DRAIN_BATCH_SIZE  4 
 154 #define FIREHOSE_DRAIN_FOR_IO 0x1 
 155 #define FIREHOSE_DRAIN_POLL 0x2 
 159 firehose_client_drain(firehose_client_t fc
, mach_port_t port
, uint32_t flags
) 
 161         firehose_buffer_t fb 
= fc
->fc_buffer
; 
 162         firehose_chunk_t fbc
; 
 163         firehose_event_t evt
; 
 164         uint16_t volatile *fbh_ring
; 
 165         uint16_t flushed
, ref
, count 
= 0; 
 166         uint16_t client_head
, client_flushed
, sent_flushed
; 
 167         firehose_snapshot_t snapshot 
= NULL
; 
 168         bool for_io 
= (flags 
& FIREHOSE_DRAIN_FOR_IO
); 
 171                 evt 
= FIREHOSE_EVENT_IO_BUFFER_RECEIVED
; 
 172                 _Static_assert(FIREHOSE_EVENT_IO_BUFFER_RECEIVED 
== 
 173                                 FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER
, ""); 
 174                 fbh_ring 
= fb
->fb_header
.fbh_io_ring
; 
 175                 sent_flushed 
= (uint16_t)fc
->fc_io_sent_flushed_pos
; 
 176                 flushed 
= (uint16_t)fc
->fc_io_flushed_pos
; 
 177                 if (fc
->fc_needs_io_snapshot 
&& server_config
.fs_io_snapshot_started
) { 
 178                         snapshot 
= server_config
.fs_snapshot
; 
 181                 evt 
= FIREHOSE_EVENT_MEM_BUFFER_RECEIVED
; 
 182                 _Static_assert(FIREHOSE_EVENT_MEM_BUFFER_RECEIVED 
== 
 183                                 FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER
, ""); 
 184                 fbh_ring 
= fb
->fb_header
.fbh_mem_ring
; 
 185                 sent_flushed 
= (uint16_t)fc
->fc_mem_sent_flushed_pos
; 
 186                 flushed 
= (uint16_t)fc
->fc_mem_flushed_pos
; 
 187                 if (fc
->fc_needs_mem_snapshot 
&& server_config
.fs_mem_snapshot_started
) { 
 188                         snapshot 
= server_config
.fs_snapshot
; 
 192         if (slowpath(fc
->fc_memory_corrupted
)) { 
 196         client_head 
= flushed
; 
 198                 if ((uint16_t)(flushed 
+ count
) == client_head
) { 
 199                         client_head 
= firehose_client_acquire_head(fb
, for_io
); 
 200                         if ((uint16_t)(flushed 
+ count
) == client_head
) { 
 203                         if ((uint16_t)(client_head 
- sent_flushed
) >= 
 204                                         FIREHOSE_BUFFER_CHUNK_COUNT
) { 
 209                 // see firehose_buffer_ring_enqueue 
 211                         ref 
= (flushed 
+ count
) & FIREHOSE_RING_POS_IDX_MASK
; 
 212                         ref 
= os_atomic_load(&fbh_ring
[ref
], relaxed
); 
 213                         ref 
&= FIREHOSE_RING_POS_IDX_MASK
; 
 214                 } while (!fc
->fc_pid 
&& !ref
); 
 217                         _dispatch_debug("Ignoring invalid page reference in ring: %d", ref
); 
 221                 fbc 
= firehose_buffer_ref_to_chunk(fb
, ref
); 
 222                 if (fbc
->fc_pos
.fcp_stream 
== firehose_stream_metadata
) { 
 223                         // serialize with firehose_client_metadata_stream_peek 
 224                         os_unfair_lock_lock(&fc
->fc_lock
); 
 226                 server_config
.fs_handler(fc
, evt
, fbc
); 
 227                 if (slowpath(snapshot
)) { 
 228                         snapshot
->handler(fc
, evt
, fbc
); 
 230                 if (fbc
->fc_pos
.fcp_stream 
== firehose_stream_metadata
) { 
 231                         os_unfair_lock_unlock(&fc
->fc_lock
); 
 233                 // clients not using notifications (single threaded) always drain fully 
 234                 // because they use all their limit, always 
 235         } while (!fc
->fc_use_notifs 
|| count 
< DRAIN_BATCH_SIZE 
|| snapshot
); 
 238                 // we don't load the full fbh_ring_tail because that is a 64bit quantity 
 239                 // and we only need 16bits from it. and on 32bit arm, there's no way to 
 240                 // perform an atomic load of a 64bit quantity on read-only memory. 
 242                         os_atomic_add2o(fc
, fc_io_flushed_pos
, count
, relaxed
); 
 243                         client_flushed 
= os_atomic_load2o(&fb
->fb_header
, 
 244                                 fbh_ring_tail
.frp_io_flushed
, relaxed
); 
 246                         os_atomic_add2o(fc
, fc_mem_flushed_pos
, count
, relaxed
); 
 247                         client_flushed 
= os_atomic_load2o(&fb
->fb_header
, 
 248                                 fbh_ring_tail
.frp_mem_flushed
, relaxed
); 
 251                         // will fire firehose_client_notify() because port is MACH_PORT_DEAD 
 253                 } else if (!port 
&& client_flushed 
== sent_flushed 
&& fc
->fc_use_notifs
) { 
 258         if (slowpath(snapshot
)) { 
 259                 firehose_client_snapshot_finish(fc
, snapshot
, for_io
); 
 260                 firehose_client_snapshot_mark_done(fc
, snapshot
, for_io
); 
 263                 firehose_client_notify(fc
, port
); 
 266                 if (!(flags 
& FIREHOSE_DRAIN_POLL
)) { 
 267                         // see firehose_client_kernel_source_handle_event 
 268                         dispatch_resume(fc
->fc_kernel_source
); 
 271                 if (fc
->fc_use_notifs 
&& count 
>= DRAIN_BATCH_SIZE
) { 
 272                         // if we hit the drain batch size, the client probably logs a lot 
 273                         // and there's more to drain, so optimistically schedule draining 
 274                         // again this is cheap since the queue is hot, and is fair for other 
 276                         firehose_client_push_async_merge(fc
, 0, for_io
); 
 278                 if (count 
&& server_config
.fs_kernel_client
) { 
 279                         // the kernel is special because it can drop messages, so if we're 
 280                         // draining, poll the kernel each time while we're bound to a thread 
 281                         firehose_client_drain(server_config
.fs_kernel_client
, 
 282                                         MACH_PORT_NULL
, flags 
| FIREHOSE_DRAIN_POLL
); 
 289                 firehose_client_snapshot_mark_done(fc
, snapshot
, for_io
); 
 291         firehose_client_mark_corrupted(fc
, port
); 
 292         // from now on all IO/mem drains depending on `for_io` will be no-op 
 293         // (needs_<for_io>_snapshot: false, memory_corrupted: true). we can safely 
 294         // silence the corresponding source of drain wake-ups. 
 296                 dispatch_source_cancel(for_io 
? fc
->fc_io_source 
: fc
->fc_mem_source
); 
 301 firehose_client_drain_io_async(void *ctx
) 
 303         firehose_client_drain(ctx
, MACH_PORT_NULL
, FIREHOSE_DRAIN_FOR_IO
); 
 307 firehose_client_drain_mem_async(void *ctx
) 
 309         firehose_client_drain(ctx
, MACH_PORT_NULL
, 0); 
 314 firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED
) 
 316         firehose_snapshot_t snapshot 
= server_config
.fs_snapshot
; 
 317         firehose_buffer_t fb 
= fc
->fc_buffer
; 
 319         dispatch_assert_queue(server_config
.fs_io_drain_queue
); 
 321         // if a client dies between phase 1 and 2 of starting the snapshot 
 322         // (see firehose_snapshot_start)) there's no handler to call, but the 
 323         // dispatch group has to be adjusted for this client going away. 
 324         if (fc
->fc_needs_io_snapshot
) { 
 325                 dispatch_group_leave(snapshot
->fs_group
); 
 326                 fc
->fc_needs_io_snapshot 
= false; 
 328         if (fc
->fc_needs_mem_snapshot
) { 
 329                 dispatch_group_leave(snapshot
->fs_group
); 
 330                 fc
->fc_needs_mem_snapshot 
= false; 
 332         if (fc
->fc_memory_corrupted
) { 
 333                 server_config
.fs_handler(fc
, FIREHOSE_EVENT_CLIENT_CORRUPTED
, 
 336         server_config
.fs_handler(fc
, FIREHOSE_EVENT_CLIENT_DIED
, NULL
); 
 338         TAILQ_REMOVE(&server_config
.fs_clients
, fc
, fc_entry
); 
 339         dispatch_release(fc
->fc_mach_channel
); 
 340         fc
->fc_mach_channel 
= NULL
; 
 341         fc
->fc_entry
.tqe_next 
= DISPATCH_OBJECT_LISTLESS
; 
 342         fc
->fc_entry
.tqe_prev 
= DISPATCH_OBJECT_LISTLESS
; 
 343         _os_object_release(&fc
->fc_as_os_object
); 
 348 firehose_client_handle_death(void *ctxt
) 
 350         firehose_client_t fc 
= ctxt
; 
 351         firehose_buffer_t fb 
= fc
->fc_buffer
; 
 352         firehose_buffer_header_t fbh 
= &fb
->fb_header
; 
 353         uint64_t mem_bitmap 
= 0, bitmap
; 
 355         if (fc
->fc_memory_corrupted
) { 
 356                 return firehose_client_finalize(fc
); 
 359         dispatch_assert_queue(server_config
.fs_io_drain_queue
); 
 361         // acquire to match release barriers from threads that died 
 362         os_atomic_thread_fence(acquire
); 
 364         bitmap 
= fbh
->fbh_bank
.fbb_bitmap 
& ~1ULL; 
 365         for (int for_io 
= 0; for_io 
< 2; for_io
++) { 
 366                 uint16_t volatile *fbh_ring
; 
 367                 uint16_t tail
, flushed
; 
 370                         fbh_ring 
= fbh
->fbh_io_ring
; 
 371                         tail 
= fbh
->fbh_ring_tail
.frp_io_tail
; 
 372                         flushed 
= (uint16_t)fc
->fc_io_flushed_pos
; 
 374                         fbh_ring 
= fbh
->fbh_mem_ring
; 
 375                         tail 
= fbh
->fbh_ring_tail
.frp_mem_tail
; 
 376                         flushed 
= (uint16_t)fc
->fc_mem_flushed_pos
; 
 378                 if ((uint16_t)(flushed 
- tail
) >= FIREHOSE_BUFFER_CHUNK_COUNT
) { 
 379                         fc
->fc_memory_corrupted 
= true; 
 380                         return firehose_client_finalize(fc
); 
 383                 // remove the pages that we flushed already from the bitmap 
 384                 for (; tail 
!= flushed
; tail
++) { 
 385                         uint16_t ring_pos 
= tail 
& FIREHOSE_RING_POS_IDX_MASK
; 
 386                         uint16_t ref 
= fbh_ring
[ring_pos
] & FIREHOSE_RING_POS_IDX_MASK
; 
 388                         bitmap 
&= ~(1ULL << ref
); 
 392         firehose_snapshot_t snapshot 
= server_config
.fs_snapshot
; 
 394         // Then look at all the allocated pages not seen in the ring 
 396                 uint16_t ref 
= firehose_bitmap_first_set(bitmap
); 
 397                 firehose_chunk_t fbc 
= firehose_buffer_ref_to_chunk(fb
, ref
); 
 398                 uint16_t fbc_length 
= fbc
->fc_pos
.fcp_next_entry_offs
; 
 400                 bitmap 
&= ~(1ULL << ref
); 
 401                 if (fbc
->fc_start 
+ fbc_length 
<= fbc
->fc_data
) { 
 402                         // this page has its "recycle-requeue" done, but hasn't gone 
 403                         // through "recycle-reuse", or it has no data, ditch it 
 406                 if (!((firehose_tracepoint_t
)fbc
->fc_data
)->ft_length
) { 
 407                         // this thing has data, but the first tracepoint is unreadable 
 408                         // so also just ditch it 
 411                 if (!fbc
->fc_pos
.fcp_flag_io
) { 
 412                         mem_bitmap 
|= 1ULL << ref
; 
 415                 server_config
.fs_handler(fc
, FIREHOSE_EVENT_IO_BUFFER_RECEIVED
, fbc
); 
 416                 if (fc
->fc_needs_io_snapshot 
&& server_config
.fs_io_snapshot_started
) { 
 417                         snapshot
->handler(fc
, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER
, fbc
); 
 422                 return firehose_client_finalize(fc
); 
 425         dispatch_async(server_config
.fs_mem_drain_queue
, ^{ 
 426                 uint64_t mem_bitmap_copy 
= mem_bitmap
; 
 428                 while (mem_bitmap_copy
) { 
 429                         uint16_t ref 
= firehose_bitmap_first_set(mem_bitmap_copy
); 
 430                         firehose_chunk_t fbc 
= firehose_buffer_ref_to_chunk(fb
, ref
); 
 432                         mem_bitmap_copy 
&= ~(1ULL << ref
); 
 433                         server_config
.fs_handler(fc
, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED
, fbc
); 
 434                         if (fc
->fc_needs_mem_snapshot 
&& server_config
.fs_mem_snapshot_started
) { 
 435                                 snapshot
->handler(fc
, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER
, fbc
); 
 439                 dispatch_async_f(server_config
.fs_io_drain_queue
, fc
, 
 440                                 (dispatch_function_t
)firehose_client_finalize
); 
 445 firehose_client_handle_mach_event(void *ctx
, dispatch_mach_reason_t reason
, 
 446                 dispatch_mach_msg_t dmsg
, mach_error_t error OS_UNUSED
) 
 448         mach_msg_header_t 
*msg_hdr
; 
 449         firehose_client_t fc 
= ctx
; 
 452         case DISPATCH_MACH_MESSAGE_RECEIVED
: 
 453                 msg_hdr 
= dispatch_mach_msg_get_msg(dmsg
, NULL
); 
 454                 if (msg_hdr
->msgh_id 
== MACH_NOTIFY_NO_SENDERS
) { 
 455                         _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", 
 456                                         firehose_client_get_unique_pid(fc
, NULL
)); 
 457                         dispatch_mach_cancel(fc
->fc_mach_channel
); 
 459                         firehose_server_demux(fc
, msg_hdr
); 
 463         case DISPATCH_MACH_CANCELED
: 
 464                 firehose_client_cancel(fc
); 
 469 #if !TARGET_OS_SIMULATOR 
 471 firehose_client_kernel_source_handle_event(void *ctxt
) 
 473         firehose_client_t fc 
= ctxt
; 
 475         // resumed in firehose_client_drain for both memory and I/O 
 476         dispatch_suspend(fc
->fc_kernel_source
); 
 477         dispatch_suspend(fc
->fc_kernel_source
); 
 478         dispatch_async_f(server_config
.fs_mem_drain_queue
, 
 479                         fc
, firehose_client_drain_mem_async
); 
 480         dispatch_async_f(server_config
.fs_io_drain_queue
, 
 481                         fc
, firehose_client_drain_io_async
); 
 486 firehose_client_resume(firehose_client_t fc
, 
 487                 const struct firehose_client_connected_info_s 
*fcci
) 
 489         dispatch_assert_queue(server_config
.fs_io_drain_queue
); 
 490         TAILQ_INSERT_TAIL(&server_config
.fs_clients
, fc
, fc_entry
); 
 491         server_config
.fs_handler(fc
, FIREHOSE_EVENT_CLIENT_CONNECTED
, (void *)fcci
); 
 493                 dispatch_activate(fc
->fc_kernel_source
); 
 495                 dispatch_mach_connect(fc
->fc_mach_channel
, 
 496                                 fc
->fc_recvp
, fc
->fc_sendp
, NULL
); 
 497                 dispatch_activate(fc
->fc_io_source
); 
 498                 dispatch_activate(fc
->fc_mem_source
); 
 503 firehose_client_cancel(firehose_client_t fc
) 
 505         dispatch_block_t block
; 
 507         _dispatch_debug("client died (unique_pid: 0x%llx", 
 508                         firehose_client_get_unique_pid(fc
, NULL
)); 
 510         if (MACH_PORT_VALID(fc
->fc_sendp
)) { 
 511                 firehose_mach_port_send_release(fc
->fc_sendp
); 
 512                 fc
->fc_sendp 
= MACH_PORT_NULL
; 
 514         if (MACH_PORT_VALID(fc
->fc_recvp
)) { 
 515                 firehose_mach_port_recv_dispose(fc
->fc_recvp
, fc
); 
 516                 fc
->fc_recvp 
= MACH_PORT_NULL
; 
 518         fc
->fc_use_notifs 
= false; 
 519         dispatch_source_cancel(fc
->fc_io_source
); 
 520         dispatch_source_cancel(fc
->fc_mem_source
); 
 522         block 
= dispatch_block_create(DISPATCH_BLOCK_DETACHED
, ^{ 
 523                 dispatch_async_f(server_config
.fs_io_drain_queue
, fc
, 
 524                                 firehose_client_handle_death
); 
 526         dispatch_async(server_config
.fs_mem_drain_queue
, block
); 
 527         _Block_release(block
); 
 530 static firehose_client_t
 
 531 _firehose_client_create(firehose_buffer_t fb
) 
 533         firehose_client_t fc
; 
 535         fc 
= (firehose_client_t
)_os_object_alloc_realized(FIREHOSE_CLIENT_CLASS
, 
 536                         sizeof(struct firehose_client_s
)); 
 538         fc
->fc_mem_flushed_pos 
= fb
->fb_header
.fbh_bank
.fbb_mem_flushed
; 
 539         fc
->fc_mem_sent_flushed_pos 
= fc
->fc_mem_flushed_pos
; 
 540         fc
->fc_io_flushed_pos 
= fb
->fb_header
.fbh_bank
.fbb_io_flushed
; 
 541         fc
->fc_io_sent_flushed_pos 
= fc
->fc_io_flushed_pos
; 
 546 typedef struct firehose_token_s 
{ 
 558 static firehose_client_t
 
 559 firehose_client_create(firehose_buffer_t fb
, firehose_token_t token
, 
 560                 mach_port_t comm_recvp
, mach_port_t comm_sendp
) 
 562         uint64_t unique_pid 
= fb
->fb_header
.fbh_uniquepid
; 
 563         firehose_client_t fc 
= _firehose_client_create(fb
); 
 565         dispatch_source_t ds
; 
 567         fc
->fc_pid 
= token
->pid 
? token
->pid 
: ~0; 
 568         fc
->fc_euid 
= token
->euid
; 
 569         fc
->fc_pidversion 
= token
->execcnt
; 
 570         ds 
= dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR
, 0, 0, 
 571                         server_config
.fs_mem_drain_queue
); 
 572         _os_object_retain_internal_inline(&fc
->fc_as_os_object
); 
 573         dispatch_set_context(ds
, fc
); 
 574         dispatch_set_finalizer_f(ds
, 
 575                         (dispatch_function_t
)_os_object_release_internal
); 
 576         dispatch_source_set_event_handler_f(ds
, firehose_client_drain_mem_async
); 
 577         fc
->fc_mem_source 
= ds
; 
 579         ds 
= dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR
, 0, 0, 
 580                         server_config
.fs_io_drain_queue
); 
 581         _os_object_retain_internal_inline(&fc
->fc_as_os_object
); 
 582         dispatch_set_context(ds
, fc
); 
 583         dispatch_set_finalizer_f(ds
, 
 584                         (dispatch_function_t
)_os_object_release_internal
); 
 585         dispatch_source_set_event_handler_f(ds
, firehose_client_drain_io_async
); 
 586         fc
->fc_io_source 
= ds
; 
 588         _dispatch_debug("FIREHOSE_REGISTER (unique_pid: 0x%llx)", unique_pid
); 
 589         fc
->fc_recvp 
= comm_recvp
; 
 590         fc
->fc_sendp 
= comm_sendp
; 
 591         firehose_mach_port_guard(comm_recvp
, true, fc
); 
 592         dm 
= dispatch_mach_create_f("com.apple.firehose.peer", 
 593                         server_config
.fs_ipc_queue
, 
 594                         fc
, firehose_client_handle_mach_event
); 
 595         fc
->fc_mach_channel 
= dm
; 
 600 firehose_kernel_client_create(void) 
 602 #if !TARGET_OS_SIMULATOR 
 603         struct firehose_server_s 
*fs 
= &server_config
; 
 604         firehose_buffer_map_info_t fb_map
; 
 605         firehose_client_t fc
; 
 606         dispatch_source_t ds
; 
 609         while ((fd 
= open("/dev/oslog", O_RDWR
)) < 0) { 
 610                 if (errno 
== EINTR
) { 
 613                 if (errno 
== ENOENT
) { 
 616                 DISPATCH_INTERNAL_CRASH(errno
, "Unable to open /dev/oslog"); 
 619         while (ioctl(fd
, LOGBUFFERMAP
, &fb_map
) < 0) { 
 620                 if (errno 
== EINTR
) { 
 623                 DISPATCH_INTERNAL_CRASH(errno
, "Unable to map kernel buffer"); 
 625         if (fb_map
.fbmi_size 
!= 
 626                         FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT 
* FIREHOSE_CHUNK_SIZE
) { 
 627                 DISPATCH_INTERNAL_CRASH(fb_map
.fbmi_size
, "Unexpected kernel buffer size"); 
 630         fc 
= _firehose_client_create((firehose_buffer_t
)(uintptr_t)fb_map
.fbmi_addr
); 
 631         ds 
= dispatch_source_create(DISPATCH_SOURCE_TYPE_READ
, (uintptr_t)fd
, 0, 
 633         dispatch_set_context(ds
, fc
); 
 634         dispatch_source_set_event_handler_f(ds
, 
 635                         firehose_client_kernel_source_handle_event
); 
 636         fc
->fc_kernel_source 
= ds
; 
 637         fc
->fc_use_notifs 
= true; 
 638         fc
->fc_sendp 
= MACH_PORT_DEAD
; // causes drain() to call notify 
 640         fs
->fs_kernel_fd 
= fd
; 
 641         fs
->fs_kernel_client 
= fc
; 
 646 _firehose_client_dispose(firehose_client_t fc
) 
 648         vm_deallocate(mach_task_self(), (vm_address_t
)fc
->fc_buffer
, 
 649                         sizeof(*fc
->fc_buffer
)); 
 650         fc
->fc_buffer 
= NULL
; 
 651         server_config
.fs_handler(fc
, FIREHOSE_EVENT_CLIENT_FINALIZE
, NULL
); 
 655 _firehose_client_xref_dispose(firehose_client_t fc
) 
 657         _dispatch_debug("Cleaning up client info for unique_pid 0x%llx", 
 658                         firehose_client_get_unique_pid(fc
, NULL
)); 
 660         dispatch_release(fc
->fc_io_source
); 
 661         fc
->fc_io_source 
= NULL
; 
 663         dispatch_release(fc
->fc_mem_source
); 
 664         fc
->fc_mem_source 
= NULL
; 
 668 firehose_client_get_unique_pid(firehose_client_t fc
, pid_t 
*pid_out
) 
 670         firehose_buffer_header_t fbh 
= &fc
->fc_buffer
->fb_header
; 
 671         if (pid_out
) *pid_out 
= fc
->fc_pid
; 
 672         if (!fc
->fc_pid
) return 0; 
 673         return fbh
->fbh_uniquepid 
? fbh
->fbh_uniquepid 
: ~0ull; 
 677 firehose_client_get_euid(firehose_client_t fc
) 
 683 firehose_client_get_pid_version(firehose_client_t fc
) 
 685         return fc
->fc_pidversion
; 
 689 firehose_client_get_metadata_buffer(firehose_client_t client
, size_t *size
) 
 691         firehose_buffer_header_t fbh 
= &client
->fc_buffer
->fb_header
; 
 693         *size 
= FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE
; 
 694         return (void *)((uintptr_t)(fbh 
+ 1) - *size
); 
 698 firehose_client_get_context(firehose_client_t fc
) 
 700         return os_atomic_load2o(fc
, fc_ctxt
, relaxed
); 
 704 firehose_client_set_context(firehose_client_t fc
, void *ctxt
) 
 706         return os_atomic_xchg2o(fc
, fc_ctxt
, ctxt
, relaxed
); 
 710 #pragma mark firehose server 
 713  * The current_message context stores the client info for the current message 
 714  * being handled. The only reason this works is because currently the message 
 715  * processing is serial. If that changes, this would not work. 
 717 static firehose_client_t cur_client_info
; 
 720 firehose_server_handle_mach_event(void *ctx OS_UNUSED
, 
 721                 dispatch_mach_reason_t reason
, dispatch_mach_msg_t dmsg
, 
 722                 mach_error_t error OS_UNUSED
) 
 724         mach_msg_header_t 
*msg_hdr 
= NULL
; 
 726         if (reason 
== DISPATCH_MACH_MESSAGE_RECEIVED
) { 
 727                 msg_hdr 
= dispatch_mach_msg_get_msg(dmsg
, NULL
); 
 728                 /* TODO: Assert this should be a register message */ 
 729                 firehose_server_demux(NULL
, msg_hdr
); 
 734 firehose_server_init(mach_port_t comm_port
, firehose_handler_t handler
) 
 736         struct firehose_server_s 
*fs 
= &server_config
; 
 737         dispatch_queue_attr_t attr
; 
 740         // just reference the string so that it's captured 
 741         (void)os_atomic_load(&__libfirehose_serverVersionString
[0], relaxed
); 
 743         attr 
= dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL
, 
 744                         QOS_CLASS_USER_INITIATED
, 0); 
 745         fs
->fs_ipc_queue 
= dispatch_queue_create_with_target( 
 746                         "com.apple.firehose.ipc", attr
, NULL
); 
 747         fs
->fs_snapshot_gate_queue 
= dispatch_queue_create_with_target( 
 748                         "com.apple.firehose.snapshot-gate", DISPATCH_QUEUE_SERIAL
, NULL
); 
 749         fs
->fs_io_drain_queue 
= dispatch_queue_create_with_target( 
 750                         "com.apple.firehose.drain-io", DISPATCH_QUEUE_SERIAL
, NULL
); 
 751         fs
->fs_mem_drain_queue 
= dispatch_queue_create_with_target( 
 752                         "com.apple.firehose.drain-mem", DISPATCH_QUEUE_SERIAL
, NULL
); 
 754         dm 
= dispatch_mach_create_f("com.apple.firehose.listener", 
 755                         fs
->fs_ipc_queue
, NULL
, firehose_server_handle_mach_event
); 
 756         fs
->fs_bootstrap_port 
= comm_port
; 
 757         fs
->fs_mach_channel 
= dm
; 
 758         fs
->fs_handler 
= _Block_copy(handler
); 
 759         firehose_kernel_client_create(); 
 763 firehose_server_assert_spi_version(uint32_t spi_version
) 
 765         if (spi_version 
!= OS_FIREHOSE_SPI_VERSION
) { 
 766                 DISPATCH_CLIENT_CRASH(spi_version
, "firehose server version mismatch (" 
 767                                 OS_STRINGIFY(OS_FIREHOSE_SPI_VERSION
) ")"); 
 769         if (_firehose_spi_version 
!= OS_FIREHOSE_SPI_VERSION
) { 
 770                 DISPATCH_CLIENT_CRASH(_firehose_spi_version
, 
 771                                 "firehose libdispatch version mismatch (" 
 772                                 OS_STRINGIFY(OS_FIREHOSE_SPI_VERSION
) ")"); 
 778 firehose_server_has_ever_flushed_pages(void) 
 780         // Use the IO pages flushed count from the kernel client as an 
 781         // approximation for whether the firehose has ever flushed pages during 
 782         // this boot. logd uses this detect the first time it starts after a 
 784         firehose_client_t fhc 
= server_config
.fs_kernel_client
; 
 785         return !fhc 
|| fhc
->fc_io_flushed_pos 
> 0; 
 789 firehose_server_resume(void) 
 791         struct firehose_server_s 
*fs 
= &server_config
; 
 793         if (fs
->fs_kernel_client
) { 
 794                 dispatch_async(fs
->fs_io_drain_queue
, ^{ 
 795                         struct firehose_client_connected_info_s fcci 
= { 
 796                                 .fcci_version 
= FIREHOSE_CLIENT_CONNECTED_INFO_VERSION
, 
 798                         firehose_client_resume(fs
->fs_kernel_client
, &fcci
); 
 801         dispatch_mach_connect(fs
->fs_mach_channel
, fs
->fs_bootstrap_port
, 
 802                         MACH_PORT_NULL
, NULL
); 
 807 _firehose_server_cancel(void *ctxt OS_UNUSED
) 
 809         firehose_client_t fc
; 
 810         TAILQ_FOREACH(fc
, &server_config
.fs_clients
, fc_entry
) { 
 811                 dispatch_mach_cancel(fc
->fc_mach_channel
); 
 816 firehose_server_cancel(void) 
 818         dispatch_mach_cancel(server_config
.fs_mach_channel
); 
 819         dispatch_async_f(server_config
.fs_io_drain_queue
, NULL
, 
 820                         _firehose_server_cancel
); 
 824 firehose_server_copy_queue(firehose_server_queue_t which
) 
 828         case FIREHOSE_SERVER_QUEUE_IO
: 
 829                 dq 
= server_config
.fs_io_drain_queue
; 
 831         case FIREHOSE_SERVER_QUEUE_MEMORY
: 
 832                 dq 
= server_config
.fs_mem_drain_queue
; 
 835                 DISPATCH_INTERNAL_CRASH(which
, "Invalid firehose server queue type"); 
 842 #pragma mark firehose snapshot and peeking 
 845 firehose_client_metadata_stream_peek(firehose_client_t fc
, 
 846                 OS_UNUSED firehose_event_t context
, bool (^peek_should_start
)(void), 
 847                 bool (^peek
)(firehose_chunk_t fbc
)) 
 849         os_unfair_lock_lock(&fc
->fc_lock
); 
 851         if (peek_should_start 
&& peek_should_start()) { 
 852                 firehose_buffer_t fb 
= fc
->fc_buffer
; 
 853                 firehose_buffer_header_t fbh 
= &fb
->fb_header
; 
 854                 uint64_t bitmap 
= fbh
->fbh_bank
.fbb_metadata_bitmap
; 
 857                         uint16_t ref 
= firehose_bitmap_first_set(bitmap
); 
 858                         firehose_chunk_t fbc 
= firehose_buffer_ref_to_chunk(fb
, ref
); 
 859                         uint16_t fbc_length 
= fbc
->fc_pos
.fcp_next_entry_offs
; 
 861                         bitmap 
&= ~(1ULL << ref
); 
 862                         if (fbc
->fc_start 
+ fbc_length 
<= fbc
->fc_data
) { 
 863                                 // this page has its "recycle-requeue" done, but hasn't gone 
 864                                 // through "recycle-reuse", or it has no data, ditch it 
 867                         if (!((firehose_tracepoint_t
)fbc
->fc_data
)->ft_length
) { 
 868                                 // this thing has data, but the first tracepoint is unreadable 
 869                                 // so also just ditch it 
 872                         if (fbc
->fc_pos
.fcp_stream 
!= firehose_stream_metadata
) { 
 881         os_unfair_lock_unlock(&fc
->fc_lock
); 
 886 firehose_client_snapshot_finish(firehose_client_t fc
, 
 887                 firehose_snapshot_t snapshot
, bool for_io
) 
 889         firehose_buffer_t fb 
= fc
->fc_buffer
; 
 890         firehose_buffer_header_t fbh 
= &fb
->fb_header
; 
 891         firehose_snapshot_event_t evt
; 
 892         uint16_t volatile *fbh_ring
; 
 893         uint16_t tail
, flushed
; 
 899                 fbh_ring 
= fbh
->fbh_io_ring
; 
 900                 tail 
= fbh
->fbh_ring_tail
.frp_io_tail
; 
 901                 flushed 
= (uint16_t)fc
->fc_io_flushed_pos
; 
 902                 evt 
= FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER
; 
 904                 fbh_ring 
= fbh
->fbh_mem_ring
; 
 905                 tail 
= fbh
->fbh_ring_tail
.frp_mem_tail
; 
 906                 flushed 
= (uint16_t)fc
->fc_mem_flushed_pos
; 
 907                 evt 
= FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER
; 
 909         if ((uint16_t)(flushed 
- tail
) >= FIREHOSE_BUFFER_CHUNK_COUNT
) { 
 910                 fc
->fc_memory_corrupted 
= true; 
 914         // remove the pages that we flushed already from the bitmap 
 915         for (; tail 
!= flushed
; tail
++) { 
 916                 uint16_t idx 
= tail 
& FIREHOSE_RING_POS_IDX_MASK
; 
 917                 uint16_t ref 
= fbh_ring
[idx
] & FIREHOSE_RING_POS_IDX_MASK
; 
 919                 bitmap 
&= ~(1ULL << ref
); 
 922         // Remove pages that are free by AND-ing with the allocating bitmap. 
 923         // The load of fbb_bitmap may not be atomic, but it's ok because bits 
 924         // being flipped are pages we don't care about snapshotting. The worst thing 
 925         // that can happen is that we go peek at an unmapped page and we fault it in 
 926         bitmap 
&= fbh
->fbh_bank
.fbb_bitmap
; 
 928         // Then look at all the allocated pages not seen in the ring 
 930                 uint16_t ref 
= firehose_bitmap_first_set(bitmap
); 
 931                 firehose_chunk_t fbc 
= firehose_buffer_ref_to_chunk(fb
, ref
); 
 932                 uint16_t fbc_length 
= fbc
->fc_pos
.fcp_next_entry_offs
; 
 934                 bitmap 
&= ~(1ULL << ref
); 
 935                 if (fbc
->fc_start 
+ fbc_length 
<= fbc
->fc_data
) { 
 936                         // this page has its "recycle-requeue" done, but hasn't gone 
 937                         // through "recycle-reuse", or it has no data, ditch it 
 940                 if (!((firehose_tracepoint_t
)fbc
->fc_data
)->ft_length
) { 
 941                         // this thing has data, but the first tracepoint is unreadable 
 942                         // so also just ditch it 
 945                 if (fbc
->fc_pos
.fcp_flag_io 
!= for_io
) { 
 948                 snapshot
->handler(fc
, evt
, fbc
); 
 953 firehose_snapshot_start(void *ctxt
) 
 955         firehose_snapshot_t snapshot 
= ctxt
; 
 956         firehose_client_t fci
; 
 959         // 0. we need to be on the IO queue so that client connection and/or death 
 960         //    cannot happen concurrently 
 961         dispatch_assert_queue(server_config
.fs_io_drain_queue
); 
 962         server_config
.fs_snapshot 
= snapshot
; 
 964         // 1. mark all the clients participating in the current snapshot 
 965         //    and enter the group for each bit set 
 966         TAILQ_FOREACH(fci
, &server_config
.fs_clients
, fc_entry
) { 
 968 #if TARGET_OS_SIMULATOR 
 972                 if (slowpath(fci
->fc_memory_corrupted
)) { 
 975                 fci
->fc_needs_io_snapshot 
= true; 
 976                 fci
->fc_needs_mem_snapshot 
= true; 
 980                 // cheating: equivalent to dispatch_group_enter() n times 
 981                 // without the acquire barriers that we don't need 
 982                 os_atomic_add2o(snapshot
->fs_group
, dg_value
, n
, relaxed
); 
 985         dispatch_async(server_config
.fs_mem_drain_queue
, ^{ 
 986                 // 2. start the fs_mem_snapshot, this is what triggers the snapshot 
 987                 //    logic from _drain() or handle_death() 
 988                 server_config
.fs_mem_snapshot_started 
= true; 
 989                 snapshot
->handler(NULL
, FIREHOSE_SNAPSHOT_EVENT_MEM_START
, NULL
); 
 991                 dispatch_async(server_config
.fs_io_drain_queue
, ^{ 
 992                         firehose_client_t fcj
; 
 994                         // 3. start the fs_io_snapshot, this is what triggers the snapshot 
 995                         //    logic from _drain() or handle_death() 
 996                         //    29868879: must always happen after the memory snapshot started 
 997                         server_config
.fs_io_snapshot_started 
= true; 
 998                         snapshot
->handler(NULL
, FIREHOSE_SNAPSHOT_EVENT_IO_START
, NULL
); 
1000                         // match group_enter from firehose_snapshot() after MEM+IO_START 
1001                         dispatch_group_leave(snapshot
->fs_group
); 
1003                         // 3. tickle all the clients. the list of clients may have changed 
1004                         //    since step 1, but worry not - new clients don't have 
1005                         //    fc_needs_*_snapshot set so drain is harmless; clients that 
1006                         //    were removed from the list have already left the group 
1007                         //    (see firehose_client_finalize()) 
1008                         TAILQ_FOREACH(fcj
, &server_config
.fs_clients
, fc_entry
) { 
1010 #if !TARGET_OS_SIMULATOR 
1011                                         firehose_client_kernel_source_handle_event(fcj
); 
1014                                         dispatch_source_merge_data(fcj
->fc_io_source
, 1); 
1015                                         dispatch_source_merge_data(fcj
->fc_mem_source
, 1); 
1023 firehose_snapshot_finish(void *ctxt
) 
1025         firehose_snapshot_t fs 
= ctxt
; 
1027         fs
->handler(NULL
, FIREHOSE_SNAPSHOT_EVENT_COMPLETE
, NULL
); 
1028         server_config
.fs_snapshot 
= NULL
; 
1029         server_config
.fs_mem_snapshot_started 
= false; 
1030         server_config
.fs_io_snapshot_started 
= false; 
1032         dispatch_release(fs
->fs_group
); 
1033         Block_release(fs
->handler
); 
1036         // resume the snapshot gate queue to maybe handle the next snapshot 
1037         dispatch_resume(server_config
.fs_snapshot_gate_queue
); 
1041 firehose_snapshot_gate(void *ctxt
) 
1043         // prevent other snapshots from running until done 
1044         dispatch_suspend(server_config
.fs_snapshot_gate_queue
); 
1045         dispatch_async_f(server_config
.fs_io_drain_queue
, ctxt
, 
1046                         firehose_snapshot_start
); 
1050 firehose_snapshot(firehose_snapshot_handler_t handler
) 
1052         firehose_snapshot_t snapshot 
= malloc(sizeof(struct firehose_snapshot_s
)); 
1054         snapshot
->handler 
= Block_copy(handler
); 
1055         snapshot
->fs_group 
= dispatch_group_create(); 
1057         // keep the group entered until IO_START and MEM_START have been sent 
1058         // See firehose_snapshot_start() 
1059         dispatch_group_enter(snapshot
->fs_group
); 
1060         dispatch_group_notify_f(snapshot
->fs_group
, server_config
.fs_io_drain_queue
, 
1061                         snapshot
, firehose_snapshot_finish
); 
1063         dispatch_async_f(server_config
.fs_snapshot_gate_queue
, snapshot
, 
1064                         firehose_snapshot_gate
); 
1068 #pragma mark MiG handler routines 
1071 firehose_server_register(mach_port_t server_port OS_UNUSED
, 
1072                 mach_port_t mem_port
, mach_vm_size_t mem_size
, 
1073                 mach_port_t comm_recvp
, mach_port_t comm_sendp
, 
1074                 mach_port_t extra_info_port
, mach_vm_size_t extra_info_size
, 
1075                 audit_token_t atoken
) 
1077         mach_vm_address_t base_addr 
= 0; 
1078         firehose_client_t fc 
= NULL
; 
1080         struct firehose_client_connected_info_s fcci 
= { 
1081                 .fcci_version 
= FIREHOSE_CLIENT_CONNECTED_INFO_VERSION
, 
1084         if (mem_size 
!= sizeof(union firehose_buffer_u
)) { 
1085                 return KERN_INVALID_VALUE
; 
1089          * Request a MACH_NOTIFY_NO_SENDERS notification for recvp. That should 
1090          * indicate the client going away. 
1092         mach_port_t previous 
= MACH_PORT_NULL
; 
1093         kr 
= mach_port_request_notification(mach_task_self(), comm_recvp
, 
1094                         MACH_NOTIFY_NO_SENDERS
, 0, comm_recvp
, 
1095                         MACH_MSG_TYPE_MAKE_SEND_ONCE
, &previous
); 
1096         DISPATCH_VERIFY_MIG(kr
); 
1097         if (dispatch_assume_zero(kr
)) { 
1098                 return KERN_FAILURE
; 
1100         dispatch_assert(previous 
== MACH_PORT_NULL
); 
1102         /* Map the memory handle into the server address space */ 
1103         kr 
= mach_vm_map(mach_task_self(), &base_addr
, mem_size
, 0, 
1104                         VM_FLAGS_ANYWHERE
, mem_port
, 0, FALSE
, 
1105                         VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
); 
1106         DISPATCH_VERIFY_MIG(kr
); 
1107         if (dispatch_assume_zero(kr
)) { 
1108                 return KERN_NO_SPACE
; 
1111         if (extra_info_port 
&& extra_info_size
) { 
1112                 mach_vm_address_t addr 
= 0; 
1113                 kr 
= mach_vm_map(mach_task_self(), &addr
, extra_info_size
, 0, 
1114                                 VM_FLAGS_ANYWHERE
, extra_info_port
, 0, FALSE
, 
1115                                 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
); 
1116                 if (dispatch_assume_zero(kr
)) { 
1117                         mach_vm_deallocate(mach_task_self(), base_addr
, mem_size
); 
1118                         return KERN_NO_SPACE
; 
1120                 fcci
.fcci_data 
= (void *)(uintptr_t)addr
; 
1121                 fcci
.fcci_size 
= (size_t)extra_info_size
; 
1124         fc 
= firehose_client_create((firehose_buffer_t
)base_addr
, 
1125                         (firehose_token_t
)&atoken
, comm_recvp
, comm_sendp
); 
1126         dispatch_async(server_config
.fs_io_drain_queue
, ^{ 
1127                 firehose_client_resume(fc
, &fcci
); 
1128                 if (fcci
.fcci_size
) { 
1129                         vm_deallocate(mach_task_self(), (vm_address_t
)fcci
.fcci_data
, 
1134         if (extra_info_port
) firehose_mach_port_send_release(extra_info_port
); 
1135         firehose_mach_port_send_release(mem_port
); 
1136         return KERN_SUCCESS
; 
1140 firehose_server_push_async(mach_port_t server_port OS_UNUSED
, 
1141                 qos_class_t qos
, boolean_t for_io
, boolean_t expects_notifs
) 
1143         firehose_client_t fc 
= cur_client_info
; 
1144         pthread_priority_t pp 
= _pthread_qos_class_encode(qos
, 0, 
1145                         _PTHREAD_PRIORITY_ENFORCE_FLAG
); 
1147         _dispatch_debug("FIREHOSE_PUSH_ASYNC (unique_pid %llx)", 
1148                         firehose_client_get_unique_pid(fc
, NULL
)); 
1149         if (!slowpath(fc
->fc_memory_corrupted
)) { 
1150                 if (expects_notifs 
&& !fc
->fc_use_notifs
) { 
1151                         fc
->fc_use_notifs 
= true; 
1153                 firehose_client_push_async_merge(fc
, pp
, for_io
); 
1155         return KERN_SUCCESS
; 
1159 firehose_server_push(mach_port_t server_port OS_UNUSED
, 
1160                 mach_port_t reply_port
, qos_class_t qos
, boolean_t for_io
, 
1161                 firehose_push_reply_t 
*push_reply OS_UNUSED
) 
1163         firehose_client_t fc 
= cur_client_info
; 
1164         dispatch_block_flags_t flags 
= DISPATCH_BLOCK_ENFORCE_QOS_CLASS
; 
1165         dispatch_block_t block
; 
1168         _dispatch_debug("FIREHOSE_PUSH (unique_pid %llx)", 
1169                         firehose_client_get_unique_pid(fc
, NULL
)); 
1171         if (slowpath(fc
->fc_memory_corrupted
)) { 
1172                 firehose_client_mark_corrupted(fc
, reply_port
); 
1173                 return MIG_NO_REPLY
; 
1177                 q 
= server_config
.fs_io_drain_queue
; 
1179                 q 
= server_config
.fs_mem_drain_queue
; 
1182         block 
= dispatch_block_create_with_qos_class(flags
, qos
, 0, ^{ 
1183                 firehose_client_drain(fc
, reply_port
, 
1184                                 for_io 
? FIREHOSE_DRAIN_FOR_IO 
: 0); 
1186         dispatch_async(q
, block
); 
1187         _Block_release(block
); 
1188         return MIG_NO_REPLY
; 
1192 firehose_server_demux(firehose_client_t fc
, mach_msg_header_t 
*msg_hdr
) 
1194         const size_t reply_size 
= 
1195                         sizeof(union __ReplyUnion__firehose_server_firehose_subsystem
); 
1197         cur_client_info 
= fc
; 
1198         firehose_mig_server(firehose_server
, reply_size
, msg_hdr
);