2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
38 #include <machine/machparam.h> /* spl definitions */
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
43 #include <kern/clock.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
61 #include <machine/machparam.h>
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
66 #if defined(__arm__) || defined(__arm64__)
69 #include <IOKit/IOTypes.h>
75 * Functions in iokit:IOUserClient.cpp
78 extern void iokit_add_reference( io_object_t obj
);
79 extern void iokit_add_connect_reference( io_object_t obj
);
81 extern ipc_port_t
iokit_port_for_object( io_object_t obj
,
82 ipc_kobject_type_t type
);
84 extern kern_return_t
iokit_client_died( io_object_t obj
,
85 ipc_port_t port
, ipc_kobject_type_t type
, mach_port_mscount_t
* mscount
);
88 iokit_client_memory_for_type(
92 vm_address_t
* address
,
96 extern ppnum_t
IOGetLastPageNumber(void);
99 * Functions imported by iokit:IOUserClient.cpp
102 extern ipc_port_t
iokit_alloc_object_port( io_object_t obj
,
103 ipc_kobject_type_t type
);
105 extern kern_return_t
iokit_destroy_object_port( ipc_port_t port
);
107 extern mach_port_name_t
iokit_make_send_right( task_t task
,
108 io_object_t obj
, ipc_kobject_type_t type
);
110 extern kern_return_t
iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
);
112 extern io_object_t
iokit_lookup_connect_ref(io_object_t clientRef
, ipc_space_t task
);
114 extern io_object_t
iokit_lookup_connect_ref_current_task(io_object_t clientRef
);
116 extern void iokit_retain_port( ipc_port_t port
);
117 extern void iokit_release_port( ipc_port_t port
);
118 extern void iokit_release_port_send( ipc_port_t port
);
120 extern void iokit_lock_port(ipc_port_t port
);
121 extern void iokit_unlock_port(ipc_port_t port
);
123 extern kern_return_t
iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
);
126 * Functions imported by iokit:IOMemoryDescriptor.cpp
129 extern kern_return_t
IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
130 mach_vm_size_t length
, unsigned int mapFlags
);
132 extern kern_return_t
IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
);
134 extern kern_return_t
IOProtectCacheMode(vm_map_t map
, mach_vm_address_t va
,
135 mach_vm_size_t length
, unsigned int options
);
137 extern unsigned int IODefaultCacheBits(addr64_t pa
);
140 * Lookup a device by its port.
141 * Doesn't consume the naked send right; produces a device reference.
143 MIGEXTERN io_object_t
144 iokit_lookup_object_port(
152 iokit_lock_port(port
);
153 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_OBJECT
)) {
154 obj
= (io_object_t
) port
->ip_kobject
;
155 iokit_add_reference( obj
);
160 iokit_unlock_port(port
);
165 MIGEXTERN io_object_t
166 iokit_lookup_connect_port(
174 iokit_lock_port(port
);
175 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_CONNECT
)) {
176 obj
= (io_object_t
) port
->ip_kobject
;
177 iokit_add_connect_reference( obj
);
182 iokit_unlock_port(port
);
188 iokit_lookup_connect_ref(io_object_t connectRef
, ipc_space_t space
)
190 io_object_t obj
= NULL
;
192 if (connectRef
&& MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef
))) {
196 kr
= ipc_object_translate(space
, CAST_MACH_PORT_TO_NAME(connectRef
), MACH_PORT_RIGHT_SEND
, (ipc_object_t
*)&port
);
198 if (kr
== KERN_SUCCESS
) {
199 assert(IP_VALID(port
));
204 iokit_lock_port(port
);
205 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_CONNECT
)) {
206 obj
= (io_object_t
) port
->ip_kobject
;
207 iokit_add_connect_reference(obj
);
209 iokit_unlock_port(port
);
219 iokit_lookup_connect_ref_current_task(io_object_t connectRef
)
221 return iokit_lookup_connect_ref(connectRef
, current_space());
225 iokit_retain_port( ipc_port_t port
)
227 ipc_port_reference( port
);
231 iokit_release_port( ipc_port_t port
)
233 ipc_port_release( port
);
237 iokit_release_port_send( ipc_port_t port
)
239 ipc_port_release_send( port
);
242 extern lck_mtx_t iokit_obj_to_port_binding_lock
;
245 iokit_lock_port( __unused ipc_port_t port
)
247 lck_mtx_lock(&iokit_obj_to_port_binding_lock
);
251 iokit_unlock_port( __unused ipc_port_t port
)
253 lck_mtx_unlock(&iokit_obj_to_port_binding_lock
);
257 * Get the port for a device.
258 * Consumes a device reference; produces a naked send right.
261 iokit_make_object_port(
270 port
= iokit_port_for_object( obj
, IKOT_IOKIT_OBJECT
);
272 sendPort
= ipc_port_make_send( port
);
273 iokit_release_port( port
);
277 iokit_remove_reference( obj
);
283 iokit_make_connect_port(
292 port
= iokit_port_for_object( obj
, IKOT_IOKIT_CONNECT
);
294 sendPort
= ipc_port_make_send( port
);
295 iokit_release_port( port
);
299 iokit_remove_reference( obj
);
307 iokit_alloc_object_port( io_object_t obj
, ipc_kobject_type_t type
)
314 /* Allocate port, keeping a reference for it. */
315 port
= ipc_port_alloc_kernel();
319 /* set kobject & type */
320 // iokit_add_reference( obj );
321 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
323 /* Request no-senders notifications on the port. */
325 notify
= ipc_port_make_sonce_locked( port
);
326 ipc_port_nsrequest( port
, 1, notify
, ¬ify
);
328 assert( notify
== IP_NULL
);
338 iokit_destroy_object_port( ipc_port_t port
)
341 iokit_lock_port(port
);
342 ipc_kobject_set( port
, IKO_NULL
, IKOT_NONE
);
344 // iokit_remove_reference( obj );
345 iokit_unlock_port(port
);
346 ipc_port_dealloc_kernel( port
);
349 return( KERN_SUCCESS
);
353 iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
)
355 iokit_lock_port(port
);
356 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
357 iokit_unlock_port(port
);
359 return( KERN_SUCCESS
);
362 EXTERN mach_port_name_t
363 iokit_make_send_right( task_t task
, io_object_t obj
, ipc_kobject_type_t type
)
367 mach_port_name_t name
= 0;
370 return MACH_PORT_NULL
;
372 port
= iokit_port_for_object( obj
, type
);
374 sendPort
= ipc_port_make_send( port
);
375 iokit_release_port( port
);
379 if (IP_VALID( sendPort
)) {
381 kr
= ipc_object_copyout( task
->itk_space
, (ipc_object_t
) sendPort
,
382 MACH_MSG_TYPE_PORT_SEND
, TRUE
, &name
);
383 if ( kr
!= KERN_SUCCESS
) {
384 ipc_port_release_send( sendPort
);
385 name
= MACH_PORT_NULL
;
387 } else if ( sendPort
== IP_NULL
)
388 name
= MACH_PORT_NULL
;
389 else if ( sendPort
== IP_DEAD
)
390 name
= MACH_PORT_DEAD
;
392 iokit_remove_reference( obj
);
398 iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
)
400 return (mach_port_mod_refs( task
->itk_space
, name
, MACH_PORT_RIGHT_SEND
, delta
));
404 * Handle the No-More_Senders notification generated from a device port destroy.
405 * Since there are no longer any tasks which hold a send right to this device
406 * port a NMS notification has been generated.
410 iokit_no_senders( mach_no_senders_notification_t
* notification
)
413 io_object_t obj
= NULL
;
414 ipc_kobject_type_t type
= IKOT_NONE
;
417 port
= (ipc_port_t
) notification
->not_header
.msgh_remote_port
;
419 // convert a port to io_object_t.
420 if( IP_VALID(port
)) {
421 iokit_lock_port(port
);
422 if( ip_active(port
)) {
423 obj
= (io_object_t
) port
->ip_kobject
;
424 type
= ip_kotype( port
);
425 if( (IKOT_IOKIT_OBJECT
== type
)
426 || (IKOT_IOKIT_CONNECT
== type
))
427 iokit_add_reference( obj
);
431 iokit_unlock_port(port
);
435 mach_port_mscount_t mscount
= notification
->not_count
;
437 if( KERN_SUCCESS
!= iokit_client_died( obj
, port
, type
, &mscount
))
439 /* Re-request no-senders notifications on the port (if still active) */
441 if (ip_active(port
)) {
442 notify
= ipc_port_make_sonce_locked(port
);
443 ipc_port_nsrequest( port
, mscount
+ 1, notify
, ¬ify
);
445 if ( notify
!= IP_NULL
)
446 ipc_port_release_sonce(notify
);
451 iokit_remove_reference( obj
);
459 iokit_notify( mach_msg_header_t
* msg
)
461 switch (msg
->msgh_id
) {
462 case MACH_NOTIFY_NO_SENDERS
:
463 iokit_no_senders((mach_no_senders_notification_t
*) msg
);
466 case MACH_NOTIFY_PORT_DELETED
:
467 case MACH_NOTIFY_PORT_DESTROYED
:
468 case MACH_NOTIFY_SEND_ONCE
:
469 case MACH_NOTIFY_DEAD_NAME
:
471 printf("iokit_notify: strange notification %d\n", msg
->msgh_id
);
476 /* need to create a pmap function to generalize */
477 unsigned int IODefaultCacheBits(addr64_t pa
)
479 return(pmap_cache_attributes((ppnum_t
)(pa
>> PAGE_SHIFT
)));
482 kern_return_t
IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
483 mach_vm_size_t length
, unsigned int options
)
488 pmap_t pmap
= map
->pmap
;
490 prot
= (options
& kIOMapReadOnly
)
491 ? VM_PROT_READ
: (VM_PROT_READ
|VM_PROT_WRITE
);
493 pagenum
= (ppnum_t
)atop_64(pa
);
495 switch(options
& kIOMapCacheMask
) { /* What cache mode do we need? */
497 case kIOMapDefaultCache
:
499 flags
= IODefaultCacheBits(pa
);
502 case kIOMapInhibitCache
:
506 case kIOMapWriteThruCache
:
507 flags
= VM_WIMG_WTHRU
;
510 case kIOMapWriteCombineCache
:
511 flags
= VM_WIMG_WCOMB
;
514 case kIOMapCopybackCache
:
515 flags
= VM_WIMG_COPYBACK
;
518 case kIOMapCopybackInnerCache
:
519 flags
= VM_WIMG_INNERWBACK
;
522 case kIOMapPostedWrite
:
523 flags
= VM_WIMG_POSTED
;
527 pmap_set_cache_attributes(pagenum
, flags
);
529 vm_map_set_cache_attr(map
, (vm_map_offset_t
)va
);
532 // Set up a block mapped area
533 return pmap_map_block(pmap
, va
, pagenum
, (uint32_t) atop_64(round_page_64(length
)), prot
, 0, 0);
536 kern_return_t
IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
)
538 pmap_t pmap
= map
->pmap
;
540 pmap_remove(pmap
, trunc_page_64(va
), round_page_64(va
+ length
));
542 return( KERN_SUCCESS
);
545 kern_return_t
IOProtectCacheMode(vm_map_t __unused map
, mach_vm_address_t __unused va
,
546 mach_vm_size_t __unused length
, unsigned int __unused options
)
551 pmap_t pmap
= map
->pmap
;
552 pmap_flush_context pmap_flush_context_storage
;
553 boolean_t delayed_pmap_flush
= FALSE
;
555 prot
= (options
& kIOMapReadOnly
)
556 ? VM_PROT_READ
: (VM_PROT_READ
|VM_PROT_WRITE
);
558 switch (options
& kIOMapCacheMask
)
560 // what cache mode do we need?
561 case kIOMapDefaultCache
:
563 return (KERN_INVALID_ARGUMENT
);
565 case kIOMapInhibitCache
:
569 case kIOMapWriteThruCache
:
570 flags
= VM_WIMG_WTHRU
;
573 case kIOMapWriteCombineCache
:
574 flags
= VM_WIMG_WCOMB
;
577 case kIOMapCopybackCache
:
578 flags
= VM_WIMG_COPYBACK
;
581 case kIOMapCopybackInnerCache
:
582 flags
= VM_WIMG_INNERWBACK
;
585 case kIOMapPostedWrite
:
586 flags
= VM_WIMG_POSTED
;
590 pmap_flush_context_init(&pmap_flush_context_storage
);
591 delayed_pmap_flush
= FALSE
;
593 // enter each page's physical address in the target map
594 for (off
= 0; off
< length
; off
+= page_size
)
596 ppnum_t ppnum
= pmap_find_phys(pmap
, va
+ off
);
598 pmap_enter_options(pmap
, va
+ off
, ppnum
, prot
, VM_PROT_NONE
, flags
, TRUE
,
599 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
600 delayed_pmap_flush
= TRUE
;
603 if (delayed_pmap_flush
== TRUE
)
604 pmap_flush(&pmap_flush_context_storage
);
606 return (KERN_SUCCESS
);
609 ppnum_t
IOGetLastPageNumber(void)
611 #if __i386__ || __x86_64__
612 ppnum_t lastPage
, highest
= 0;
615 for (idx
= 0; idx
< pmap_memory_region_count
; idx
++)
617 lastPage
= pmap_memory_regions
[idx
].end
- 1;
618 if (lastPage
> highest
)
622 #elif __arm__ || __arm64__
630 void IOGetTime( mach_timespec_t
* clock_time
);
631 void IOGetTime( mach_timespec_t
* clock_time
)
635 clock_get_system_nanotime(&sec
, &nsec
);
636 clock_time
->tv_sec
= (typeof(clock_time
->tv_sec
))sec
;
637 clock_time
->tv_nsec
= nsec
;