2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
38 #include <machine/machparam.h> /* spl definitions */
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
43 #include <kern/clock.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
61 #include <machine/machparam.h>
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
66 #include <IOKit/IOTypes.h>
72 * Functions in iokit:IOUserClient.cpp
75 extern void iokit_add_reference( io_object_t obj
);
77 extern ipc_port_t
iokit_port_for_object( io_object_t obj
,
78 ipc_kobject_type_t type
);
80 extern kern_return_t
iokit_client_died( io_object_t obj
,
81 ipc_port_t port
, ipc_kobject_type_t type
, mach_port_mscount_t
* mscount
);
84 iokit_client_memory_for_type(
88 vm_address_t
* address
,
92 extern ppnum_t
IOGetLastPageNumber(void);
95 * Functions imported by iokit:IOUserClient.cpp
98 extern ipc_port_t
iokit_alloc_object_port( io_object_t obj
,
99 ipc_kobject_type_t type
);
101 extern kern_return_t
iokit_destroy_object_port( ipc_port_t port
);
103 extern mach_port_name_t
iokit_make_send_right( task_t task
,
104 io_object_t obj
, ipc_kobject_type_t type
);
106 extern kern_return_t
iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
);
108 extern io_object_t
iokit_lookup_connect_ref(io_object_t clientRef
, ipc_space_t task
);
110 extern io_object_t
iokit_lookup_connect_ref_current_task(io_object_t clientRef
);
112 extern void iokit_retain_port( ipc_port_t port
);
113 extern void iokit_release_port( ipc_port_t port
);
114 extern void iokit_release_port_send( ipc_port_t port
);
116 extern void iokit_lock_port(ipc_port_t port
);
117 extern void iokit_unlock_port(ipc_port_t port
);
119 extern kern_return_t
iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
);
122 * Functions imported by iokit:IOMemoryDescriptor.cpp
125 extern kern_return_t
IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
126 mach_vm_size_t length
, unsigned int mapFlags
);
128 extern kern_return_t
IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
);
130 extern kern_return_t
IOProtectCacheMode(vm_map_t map
, mach_vm_address_t va
,
131 mach_vm_size_t length
, unsigned int options
);
133 extern unsigned int IODefaultCacheBits(addr64_t pa
);
136 * Lookup a device by its port.
137 * Doesn't consume the naked send right; produces a device reference.
139 MIGEXTERN io_object_t
140 iokit_lookup_object_port(
143 register io_object_t obj
;
148 iokit_lock_port(port
);
149 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_OBJECT
)) {
150 obj
= (io_object_t
) port
->ip_kobject
;
151 iokit_add_reference( obj
);
156 iokit_unlock_port(port
);
161 MIGEXTERN io_object_t
162 iokit_lookup_connect_port(
165 register io_object_t obj
;
170 iokit_lock_port(port
);
171 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_CONNECT
)) {
172 obj
= (io_object_t
) port
->ip_kobject
;
173 iokit_add_reference( obj
);
178 iokit_unlock_port(port
);
184 iokit_lookup_connect_ref(io_object_t connectRef
, ipc_space_t space
)
186 io_object_t obj
= NULL
;
188 if (connectRef
&& MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef
))) {
192 kr
= ipc_object_translate(space
, CAST_MACH_PORT_TO_NAME(connectRef
), MACH_PORT_RIGHT_SEND
, (ipc_object_t
*)&port
);
194 if (kr
== KERN_SUCCESS
) {
195 assert(IP_VALID(port
));
200 iokit_lock_port(port
);
201 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_CONNECT
)) {
202 obj
= (io_object_t
) port
->ip_kobject
;
203 iokit_add_reference(obj
);
205 iokit_unlock_port(port
);
215 iokit_lookup_connect_ref_current_task(io_object_t connectRef
)
217 return iokit_lookup_connect_ref(connectRef
, current_space());
221 iokit_retain_port( ipc_port_t port
)
223 ipc_port_reference( port
);
227 iokit_release_port( ipc_port_t port
)
229 ipc_port_release( port
);
233 iokit_release_port_send( ipc_port_t port
)
235 ipc_port_release_send( port
);
238 extern lck_mtx_t iokit_obj_to_port_binding_lock
;
241 iokit_lock_port( __unused ipc_port_t port
)
243 lck_mtx_lock(&iokit_obj_to_port_binding_lock
);
247 iokit_unlock_port( __unused ipc_port_t port
)
249 lck_mtx_unlock(&iokit_obj_to_port_binding_lock
);
253 * Get the port for a device.
254 * Consumes a device reference; produces a naked send right.
257 iokit_make_object_port(
260 register ipc_port_t port
;
261 register ipc_port_t sendPort
;
266 port
= iokit_port_for_object( obj
, IKOT_IOKIT_OBJECT
);
268 sendPort
= ipc_port_make_send( port
);
269 iokit_release_port( port
);
273 iokit_remove_reference( obj
);
279 iokit_make_connect_port(
282 register ipc_port_t port
;
283 register ipc_port_t sendPort
;
288 port
= iokit_port_for_object( obj
, IKOT_IOKIT_CONNECT
);
290 sendPort
= ipc_port_make_send( port
);
291 iokit_release_port( port
);
295 iokit_remove_reference( obj
);
303 iokit_alloc_object_port( io_object_t obj
, ipc_kobject_type_t type
)
310 /* Allocate port, keeping a reference for it. */
311 port
= ipc_port_alloc_kernel();
315 /* set kobject & type */
316 // iokit_add_reference( obj );
317 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
319 /* Request no-senders notifications on the port. */
321 notify
= ipc_port_make_sonce_locked( port
);
322 ipc_port_nsrequest( port
, 1, notify
, ¬ify
);
324 assert( notify
== IP_NULL
);
334 iokit_destroy_object_port( ipc_port_t port
)
337 iokit_lock_port(port
);
338 ipc_kobject_set( port
, IKO_NULL
, IKOT_NONE
);
340 // iokit_remove_reference( obj );
341 iokit_unlock_port(port
);
342 ipc_port_dealloc_kernel( port
);
345 return( KERN_SUCCESS
);
349 iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
)
351 iokit_lock_port(port
);
352 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
353 iokit_unlock_port(port
);
355 return( KERN_SUCCESS
);
358 EXTERN mach_port_name_t
359 iokit_make_send_right( task_t task
, io_object_t obj
, ipc_kobject_type_t type
)
363 mach_port_name_t name
= 0;
366 return MACH_PORT_NULL
;
368 port
= iokit_port_for_object( obj
, type
);
370 sendPort
= ipc_port_make_send( port
);
371 iokit_release_port( port
);
375 if (IP_VALID( sendPort
)) {
377 kr
= ipc_object_copyout( task
->itk_space
, (ipc_object_t
) sendPort
,
378 MACH_MSG_TYPE_PORT_SEND
, TRUE
, &name
);
379 if ( kr
!= KERN_SUCCESS
) {
380 ipc_port_release_send( sendPort
);
381 name
= MACH_PORT_NULL
;
383 } else if ( sendPort
== IP_NULL
)
384 name
= MACH_PORT_NULL
;
385 else if ( sendPort
== IP_DEAD
)
386 name
= MACH_PORT_DEAD
;
388 iokit_remove_reference( obj
);
394 iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
)
396 return (mach_port_mod_refs( task
->itk_space
, name
, MACH_PORT_RIGHT_SEND
, delta
));
400 * Handle the No-More_Senders notification generated from a device port destroy.
401 * Since there are no longer any tasks which hold a send right to this device
402 * port a NMS notification has been generated.
406 iokit_no_senders( mach_no_senders_notification_t
* notification
)
409 io_object_t obj
= NULL
;
410 ipc_kobject_type_t type
= IKOT_NONE
;
413 port
= (ipc_port_t
) notification
->not_header
.msgh_remote_port
;
415 // convert a port to io_object_t.
416 if( IP_VALID(port
)) {
417 iokit_lock_port(port
);
418 if( ip_active(port
)) {
419 obj
= (io_object_t
) port
->ip_kobject
;
420 type
= ip_kotype( port
);
421 if( (IKOT_IOKIT_OBJECT
== type
)
422 || (IKOT_IOKIT_CONNECT
== type
))
423 iokit_add_reference( obj
);
427 iokit_unlock_port(port
);
431 mach_port_mscount_t mscount
= notification
->not_count
;
433 if( KERN_SUCCESS
!= iokit_client_died( obj
, port
, type
, &mscount
))
435 /* Re-request no-senders notifications on the port (if still active) */
437 if (ip_active(port
)) {
438 notify
= ipc_port_make_sonce_locked(port
);
439 ipc_port_nsrequest( port
, mscount
+ 1, notify
, ¬ify
);
441 if ( notify
!= IP_NULL
)
442 ipc_port_release_sonce(notify
);
447 iokit_remove_reference( obj
);
455 iokit_notify( mach_msg_header_t
* msg
)
457 switch (msg
->msgh_id
) {
458 case MACH_NOTIFY_NO_SENDERS
:
459 iokit_no_senders((mach_no_senders_notification_t
*) msg
);
462 case MACH_NOTIFY_PORT_DELETED
:
463 case MACH_NOTIFY_PORT_DESTROYED
:
464 case MACH_NOTIFY_SEND_ONCE
:
465 case MACH_NOTIFY_DEAD_NAME
:
467 printf("iokit_notify: strange notification %d\n", msg
->msgh_id
);
472 /* need to create a pmap function to generalize */
473 unsigned int IODefaultCacheBits(addr64_t pa
)
475 return(pmap_cache_attributes((ppnum_t
)(pa
>> PAGE_SHIFT
)));
478 kern_return_t
IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
479 mach_vm_size_t length
, unsigned int options
)
484 pmap_t pmap
= map
->pmap
;
486 prot
= (options
& kIOMapReadOnly
)
487 ? VM_PROT_READ
: (VM_PROT_READ
|VM_PROT_WRITE
);
489 pagenum
= (ppnum_t
)atop_64(pa
);
491 switch(options
& kIOMapCacheMask
) { /* What cache mode do we need? */
493 case kIOMapDefaultCache
:
495 flags
= IODefaultCacheBits(pa
);
498 case kIOMapInhibitCache
:
502 case kIOMapWriteThruCache
:
503 flags
= VM_WIMG_WTHRU
;
506 case kIOMapWriteCombineCache
:
507 flags
= VM_WIMG_WCOMB
;
510 case kIOMapCopybackCache
:
511 flags
= VM_WIMG_COPYBACK
;
513 case kIOMapCopybackInnerCache
:
514 flags
= VM_WIMG_INNERWBACK
;
518 pmap_set_cache_attributes(pagenum
, flags
);
520 vm_map_set_cache_attr(map
, (vm_map_offset_t
)va
);
523 // Set up a block mapped area
524 pmap_map_block(pmap
, va
, pagenum
, (uint32_t) atop_64(round_page_64(length
)), prot
, 0, 0);
526 return( KERN_SUCCESS
);
529 kern_return_t
IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
)
531 pmap_t pmap
= map
->pmap
;
533 pmap_remove(pmap
, trunc_page_64(va
), round_page_64(va
+ length
));
535 return( KERN_SUCCESS
);
538 kern_return_t
IOProtectCacheMode(vm_map_t __unused map
, mach_vm_address_t __unused va
,
539 mach_vm_size_t __unused length
, unsigned int __unused options
)
544 pmap_t pmap
= map
->pmap
;
545 pmap_flush_context pmap_flush_context_storage
;
546 boolean_t delayed_pmap_flush
= FALSE
;
548 prot
= (options
& kIOMapReadOnly
)
549 ? VM_PROT_READ
: (VM_PROT_READ
|VM_PROT_WRITE
);
551 switch (options
& kIOMapCacheMask
)
553 // what cache mode do we need?
554 case kIOMapDefaultCache
:
556 return (KERN_INVALID_ARGUMENT
);
558 case kIOMapInhibitCache
:
562 case kIOMapWriteThruCache
:
563 flags
= VM_WIMG_WTHRU
;
566 case kIOMapWriteCombineCache
:
567 flags
= VM_WIMG_WCOMB
;
570 case kIOMapCopybackCache
:
571 flags
= VM_WIMG_COPYBACK
;
575 pmap_flush_context_init(&pmap_flush_context_storage
);
576 delayed_pmap_flush
= FALSE
;
578 // enter each page's physical address in the target map
579 for (off
= 0; off
< length
; off
+= page_size
)
581 ppnum_t ppnum
= pmap_find_phys(pmap
, va
+ off
);
583 pmap_enter_options(pmap
, va
+ off
, ppnum
, prot
, VM_PROT_NONE
, flags
, TRUE
,
584 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
585 delayed_pmap_flush
= TRUE
;
588 if (delayed_pmap_flush
== TRUE
)
589 pmap_flush(&pmap_flush_context_storage
);
591 return (KERN_SUCCESS
);
594 ppnum_t
IOGetLastPageNumber(void)
596 #if __i386__ || __x86_64__
597 ppnum_t lastPage
, highest
= 0;
600 for (idx
= 0; idx
< pmap_memory_region_count
; idx
++)
602 lastPage
= pmap_memory_regions
[idx
].end
- 1;
603 if (lastPage
> highest
)
613 void IOGetTime( mach_timespec_t
* clock_time
);
614 void IOGetTime( mach_timespec_t
* clock_time
)
618 clock_get_system_nanotime(&sec
, &nsec
);
619 clock_time
->tv_sec
= (typeof(clock_time
->tv_sec
))sec
;
620 clock_time
->tv_nsec
= nsec
;