2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
38 #include <machine/machparam.h> /* spl definitions */
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
43 #include <kern/clock.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
61 #include <machine/machparam.h>
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
66 #include <IOKit/IOTypes.h>
72 * Functions in iokit:IOUserClient.cpp
75 extern void iokit_add_reference( io_object_t obj
);
77 extern ipc_port_t
iokit_port_for_object( io_object_t obj
,
78 ipc_kobject_type_t type
);
80 extern kern_return_t
iokit_client_died( io_object_t obj
,
81 ipc_port_t port
, ipc_kobject_type_t type
, mach_port_mscount_t
* mscount
);
84 iokit_client_memory_for_type(
88 vm_address_t
* address
,
92 extern ppnum_t
IOGetLastPageNumber(void);
95 * Functions imported by iokit:IOUserClient.cpp
98 extern ipc_port_t
iokit_alloc_object_port( io_object_t obj
,
99 ipc_kobject_type_t type
);
101 extern kern_return_t
iokit_destroy_object_port( ipc_port_t port
);
103 extern mach_port_name_t
iokit_make_send_right( task_t task
,
104 io_object_t obj
, ipc_kobject_type_t type
);
106 extern kern_return_t
iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
);
108 extern io_object_t
iokit_lookup_connect_ref(io_object_t clientRef
, ipc_space_t task
);
110 extern io_object_t
iokit_lookup_connect_ref_current_task(io_object_t clientRef
);
112 extern void iokit_retain_port( ipc_port_t port
);
113 extern void iokit_release_port( ipc_port_t port
);
114 extern void iokit_release_port_send( ipc_port_t port
);
116 extern void iokit_lock_port(ipc_port_t port
);
117 extern void iokit_unlock_port(ipc_port_t port
);
119 extern kern_return_t
iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
);
122 * Functions imported by iokit:IOMemoryDescriptor.cpp
125 extern kern_return_t
IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
126 mach_vm_size_t length
, unsigned int mapFlags
);
128 extern kern_return_t
IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
);
130 extern kern_return_t
IOProtectCacheMode(vm_map_t map
, mach_vm_address_t va
,
131 mach_vm_size_t length
, unsigned int options
);
133 extern unsigned int IODefaultCacheBits(addr64_t pa
);
136 * Lookup a device by its port.
137 * Doesn't consume the naked send right; produces a device reference.
139 MIGEXTERN io_object_t
140 iokit_lookup_object_port(
143 register io_object_t obj
;
148 iokit_lock_port(port
);
149 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_OBJECT
)) {
150 obj
= (io_object_t
) port
->ip_kobject
;
151 iokit_add_reference( obj
);
156 iokit_unlock_port(port
);
161 MIGEXTERN io_object_t
162 iokit_lookup_connect_port(
165 register io_object_t obj
;
170 iokit_lock_port(port
);
171 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_CONNECT
)) {
172 obj
= (io_object_t
) port
->ip_kobject
;
173 iokit_add_reference( obj
);
178 iokit_unlock_port(port
);
184 iokit_lookup_connect_ref(io_object_t connectRef
, ipc_space_t space
)
186 io_object_t obj
= NULL
;
188 if (connectRef
&& MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef
))) {
192 kr
= ipc_object_translate(space
, CAST_MACH_PORT_TO_NAME(connectRef
), MACH_PORT_RIGHT_SEND
, (ipc_object_t
*)&port
);
194 if (kr
== KERN_SUCCESS
) {
195 assert(IP_VALID(port
));
200 iokit_lock_port(port
);
201 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_CONNECT
)) {
202 obj
= (io_object_t
) port
->ip_kobject
;
203 iokit_add_reference(obj
);
205 iokit_unlock_port(port
);
215 iokit_lookup_connect_ref_current_task(io_object_t connectRef
)
217 return iokit_lookup_connect_ref(connectRef
, current_space());
221 iokit_retain_port( ipc_port_t port
)
223 ipc_port_reference( port
);
227 iokit_release_port( ipc_port_t port
)
229 ipc_port_release( port
);
233 iokit_release_port_send( ipc_port_t port
)
235 ipc_port_release_send( port
);
238 extern lck_mtx_t iokit_obj_to_port_binding_lock
;
241 iokit_lock_port( __unused ipc_port_t port
)
243 lck_mtx_lock(&iokit_obj_to_port_binding_lock
);
247 iokit_unlock_port( __unused ipc_port_t port
)
249 lck_mtx_unlock(&iokit_obj_to_port_binding_lock
);
253 * Get the port for a device.
254 * Consumes a device reference; produces a naked send right.
257 iokit_make_object_port(
260 register ipc_port_t port
;
261 register ipc_port_t sendPort
;
266 port
= iokit_port_for_object( obj
, IKOT_IOKIT_OBJECT
);
268 sendPort
= ipc_port_make_send( port
);
269 iokit_release_port( port
);
273 iokit_remove_reference( obj
);
279 iokit_make_connect_port(
282 register ipc_port_t port
;
283 register ipc_port_t sendPort
;
288 port
= iokit_port_for_object( obj
, IKOT_IOKIT_CONNECT
);
290 sendPort
= ipc_port_make_send( port
);
291 iokit_release_port( port
);
295 iokit_remove_reference( obj
);
303 iokit_alloc_object_port( io_object_t obj
, ipc_kobject_type_t type
)
310 /* Allocate port, keeping a reference for it. */
311 port
= ipc_port_alloc_kernel();
315 /* set kobject & type */
316 // iokit_add_reference( obj );
317 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
319 /* Request no-senders notifications on the port. */
321 notify
= ipc_port_make_sonce_locked( port
);
322 ipc_port_nsrequest( port
, 1, notify
, ¬ify
);
324 assert( notify
== IP_NULL
);
334 iokit_destroy_object_port( ipc_port_t port
)
336 ipc_kobject_set( port
, IKO_NULL
, IKOT_NONE
);
338 // iokit_remove_reference( obj );
340 ipc_port_dealloc_kernel( port
);
343 return( KERN_SUCCESS
);
347 iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
)
349 iokit_lock_port(port
);
350 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
351 iokit_unlock_port(port
);
353 return( KERN_SUCCESS
);
356 EXTERN mach_port_name_t
357 iokit_make_send_right( task_t task
, io_object_t obj
, ipc_kobject_type_t type
)
361 mach_port_name_t name
;
364 return MACH_PORT_NULL
;
366 port
= iokit_port_for_object( obj
, type
);
368 sendPort
= ipc_port_make_send( port
);
369 iokit_release_port( port
);
373 if (IP_VALID( sendPort
)) {
375 kr
= ipc_object_copyout( task
->itk_space
, (ipc_object_t
) sendPort
,
376 MACH_MSG_TYPE_PORT_SEND
, TRUE
, &name
);
377 if ( kr
!= KERN_SUCCESS
)
378 name
= MACH_PORT_NULL
;
379 } else if ( sendPort
== IP_NULL
)
380 name
= MACH_PORT_NULL
;
381 else if ( sendPort
== IP_DEAD
)
382 name
= MACH_PORT_DEAD
;
384 iokit_remove_reference( obj
);
390 iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
)
392 return (mach_port_mod_refs( task
->itk_space
, name
, MACH_PORT_RIGHT_SEND
, delta
));
396 * Handle the No-More_Senders notification generated from a device port destroy.
397 * Since there are no longer any tasks which hold a send right to this device
398 * port a NMS notification has been generated.
402 iokit_no_senders( mach_no_senders_notification_t
* notification
)
405 io_object_t obj
= NULL
;
406 ipc_kobject_type_t type
= IKOT_NONE
;
409 port
= (ipc_port_t
) notification
->not_header
.msgh_remote_port
;
411 // convert a port to io_object_t.
412 if( IP_VALID(port
)) {
413 iokit_lock_port(port
);
414 if( ip_active(port
)) {
415 obj
= (io_object_t
) port
->ip_kobject
;
416 type
= ip_kotype( port
);
417 if( (IKOT_IOKIT_OBJECT
== type
)
418 || (IKOT_IOKIT_CONNECT
== type
))
419 iokit_add_reference( obj
);
423 iokit_unlock_port(port
);
427 mach_port_mscount_t mscount
= notification
->not_count
;
429 if( KERN_SUCCESS
!= iokit_client_died( obj
, port
, type
, &mscount
))
431 /* Re-request no-senders notifications on the port (if still active) */
433 if (ip_active(port
)) {
434 notify
= ipc_port_make_sonce_locked(port
);
435 ipc_port_nsrequest( port
, mscount
+ 1, notify
, ¬ify
);
437 if ( notify
!= IP_NULL
)
438 ipc_port_release_sonce(notify
);
441 iokit_remove_reference( obj
);
449 iokit_notify( mach_msg_header_t
* msg
)
451 switch (msg
->msgh_id
) {
452 case MACH_NOTIFY_NO_SENDERS
:
453 iokit_no_senders((mach_no_senders_notification_t
*) msg
);
456 case MACH_NOTIFY_PORT_DELETED
:
457 case MACH_NOTIFY_PORT_DESTROYED
:
458 case MACH_NOTIFY_SEND_ONCE
:
459 case MACH_NOTIFY_DEAD_NAME
:
461 printf("iokit_notify: strange notification %d\n", msg
->msgh_id
);
466 /* need to create a pmap function to generalize */
467 unsigned int IODefaultCacheBits(addr64_t pa
)
469 return(pmap_cache_attributes((ppnum_t
)(pa
>> PAGE_SHIFT
)));
472 kern_return_t
IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
473 mach_vm_size_t length
, unsigned int options
)
478 pmap_t pmap
= map
->pmap
;
480 prot
= (options
& kIOMapReadOnly
)
481 ? VM_PROT_READ
: (VM_PROT_READ
|VM_PROT_WRITE
);
483 pagenum
= (ppnum_t
)atop_64(pa
);
485 switch(options
& kIOMapCacheMask
) { /* What cache mode do we need? */
487 case kIOMapDefaultCache
:
489 flags
= IODefaultCacheBits(pa
);
492 case kIOMapInhibitCache
:
496 case kIOMapWriteThruCache
:
497 flags
= VM_WIMG_WTHRU
;
500 case kIOMapWriteCombineCache
:
501 flags
= VM_WIMG_WCOMB
;
504 case kIOMapCopybackCache
:
505 flags
= VM_WIMG_COPYBACK
;
507 case kIOMapCopybackInnerCache
:
508 flags
= VM_WIMG_INNERWBACK
;
512 pmap_set_cache_attributes(pagenum
, flags
);
514 vm_map_set_cache_attr(map
, (vm_map_offset_t
)va
);
517 // Set up a block mapped area
518 pmap_map_block(pmap
, va
, pagenum
, (uint32_t) atop_64(round_page_64(length
)), prot
, 0, 0);
520 return( KERN_SUCCESS
);
523 kern_return_t
IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
)
525 pmap_t pmap
= map
->pmap
;
527 pmap_remove(pmap
, trunc_page_64(va
), round_page_64(va
+ length
));
529 return( KERN_SUCCESS
);
532 kern_return_t
IOProtectCacheMode(vm_map_t __unused map
, mach_vm_address_t __unused va
,
533 mach_vm_size_t __unused length
, unsigned int __unused options
)
538 pmap_t pmap
= map
->pmap
;
540 prot
= (options
& kIOMapReadOnly
)
541 ? VM_PROT_READ
: (VM_PROT_READ
|VM_PROT_WRITE
);
543 switch (options
& kIOMapCacheMask
)
545 // what cache mode do we need?
546 case kIOMapDefaultCache
:
548 return (KERN_INVALID_ARGUMENT
);
550 case kIOMapInhibitCache
:
554 case kIOMapWriteThruCache
:
555 flags
= VM_WIMG_WTHRU
;
558 case kIOMapWriteCombineCache
:
559 flags
= VM_WIMG_WCOMB
;
562 case kIOMapCopybackCache
:
563 flags
= VM_WIMG_COPYBACK
;
567 // enter each page's physical address in the target map
568 for (off
= 0; off
< length
; off
+= page_size
)
570 ppnum_t ppnum
= pmap_find_phys(pmap
, va
+ off
);
572 pmap_enter(pmap
, va
+ off
, ppnum
, prot
, VM_PROT_NONE
, flags
, TRUE
);
575 return (KERN_SUCCESS
);
578 ppnum_t
IOGetLastPageNumber(void)
580 #if __i386__ || __x86_64__
581 ppnum_t lastPage
, highest
= 0;
584 for (idx
= 0; idx
< pmap_memory_region_count
; idx
++)
586 lastPage
= pmap_memory_regions
[idx
].end
- 1;
587 if (lastPage
> highest
)
597 void IOGetTime( mach_timespec_t
* clock_time
);
598 void IOGetTime( mach_timespec_t
* clock_time
)
602 clock_get_system_nanotime(&sec
, &nsec
);
603 clock_time
->tv_sec
= (typeof(clock_time
->tv_sec
))sec
;
604 clock_time
->tv_nsec
= nsec
;