]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 | |
24 | * All Rights Reserved | |
25 | * | |
26 | * Permission to use, copy, modify, and distribute this software and | |
27 | * its documentation for any purpose and without fee is hereby granted, | |
28 | * provided that the above copyright notice appears in all copies and | |
29 | * that both the copyright notice and this permission notice appear in | |
30 | * supporting documentation. | |
31 | * | |
32 | * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE | |
33 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
34 | * FOR A PARTICULAR PURPOSE. | |
35 | * | |
36 | * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR | |
37 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM | |
38 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, | |
39 | * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION | |
40 | * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
41 | * | |
42 | */ | |
43 | /* | |
44 | * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 | |
45 | * All Rights Reserved | |
46 | * | |
47 | * Permission to use, copy, modify, and distribute this software and | |
48 | * its documentation for any purpose and without fee is hereby granted, | |
49 | * provided that the above copyright notice appears in all copies and | |
50 | * that both the copyright notice and this permission notice appear in | |
51 | * supporting documentation. | |
52 | * | |
53 | * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE | |
54 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
55 | * FOR A PARTICULAR PURPOSE. | |
56 | * | |
57 | * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR | |
58 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM | |
59 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, | |
60 | * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION | |
61 | * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
62 | */ | |
63 | /* | |
64 | * MKLINUX-1.0DR2 | |
65 | */ | |
66 | ||
67 | /* 1 April 1997 Simon Douglas: | |
68 | * Stolen wholesale from MkLinux. | |
69 | * Added nonblocking adb poll from interrupt level for the debugger. | |
70 | * Acknowledge before response so polled mode can work from inside the adb handler. | |
71 | * | |
72 | * 18 June 1998 sdouglas | |
73 | * Start IOKit version. Fix errors from kCudaSRQAssertMask. Use ool cmd & reply buffers, | |
74 | * not fixed len in packet. Does queueing here. | |
75 | * | |
76 | * 20 Nov 1998 suurballe | |
77 | * Port to C++ | |
78 | */ | |
79 | ||
80 | ||
81 | #include "AppleCuda.h" | |
82 | #include "IOCudaADBController.h" | |
83 | #include <IOKit/IOLib.h> | |
84 | #include <IOKit/IOSyncer.h> | |
85 | #include <IOKit/IOWorkLoop.h> | |
86 | #include <IOKit/IOInterruptEventSource.h> | |
87 | #include <IOKit/IODeviceMemory.h> | |
88 | #include <IOKit/IOPlatformExpert.h> | |
89 | #include <IOKit/pwr_mgt/IOPM.h> | |
90 | ||
91 | #include <IOKit/assert.h> | |
92 | ||
93 | #define super IOService | |
94 | OSDefineMetaClassAndStructors(AppleCuda,IOService) | |
95 | ||
96 | static void cuda_interrupt ( AppleCuda * self ); | |
97 | ||
98 | static void cuda_process_response(AppleCuda * self); | |
99 | static void cuda_transmit_data(AppleCuda * self); | |
100 | static void cuda_expected_attention(AppleCuda * self); | |
101 | static void cuda_unexpected_attention(AppleCuda * self); | |
102 | static void cuda_receive_data(AppleCuda * self); | |
103 | static void cuda_receive_last_byte(AppleCuda * self); | |
104 | static void cuda_collision(AppleCuda * self); | |
105 | static void cuda_idle(AppleCuda * self); | |
106 | ||
107 | static void cuda_poll(AppleCuda * self); | |
108 | static void cuda_error(AppleCuda * self); | |
109 | static void cuda_send_request(AppleCuda * self); | |
110 | static IOReturn cuda_do_sync_request( AppleCuda * self, | |
111 | cuda_request_t * request, bool polled); | |
112 | static void cuda_do_state_transition_delay(AppleCuda * self); | |
113 | ||
114 | static int Cuda_PE_poll_input(unsigned int options, char * c); | |
115 | static int Cuda_PE_read_write_time_of_day(unsigned int options, long * secs); | |
116 | static int Cuda_PE_halt_restart(unsigned int type); | |
117 | static int Cuda_PE_write_IIC(unsigned char addr, unsigned char reg, | |
118 | unsigned char data); | |
119 | ||
120 | static void | |
121 | autopollArrived ( OSObject *inCuda, IOInterruptEventSource *, int ); | |
122 | ||
123 | static int set_cuda_power_message ( int command ); | |
124 | static int set_cuda_file_server_mode ( int command ); | |
125 | static void cuda_async_set_power_message_enable( thread_call_param_t param, thread_call_param_t ); | |
126 | static void cuda_async_set_file_server_mode( thread_call_param_t param, thread_call_param_t ) ; | |
127 | ||
128 | bool CudahasRoot( OSObject * us, void *, IOService * yourDevice ); | |
129 | ||
130 | ||
131 | // | |
132 | // inline functions | |
133 | // | |
134 | ||
135 | static __inline__ unsigned char cuda_read_data(AppleCuda * self) | |
136 | { | |
137 | volatile unsigned char val; | |
138 | ||
139 | val = *self->cuda_via_regs.shift; eieio(); | |
140 | return val; | |
141 | } | |
142 | ||
143 | static __inline__ int cuda_get_result(cuda_request_t *request) | |
144 | { | |
145 | int status = ADB_RET_OK; | |
146 | int theStatus = request->a_reply.a_header[1]; | |
147 | ||
148 | if ( theStatus & kCudaTimeOutMask ) { | |
149 | status = ADB_RET_TIMEOUT; | |
150 | #if 0 | |
151 | // these are expected before autopoll mask is set | |
152 | } else if ( theStatus & kCudaSRQAssertMask ) { | |
153 | status = ADB_RET_UNEXPECTED_RESULT; | |
154 | #endif | |
155 | } else if ( theStatus & kCudaSRQErrorMask ) { | |
156 | status = ADB_RET_REQUEST_ERROR; | |
157 | } else if ( theStatus & kCudaBusErrorMask ) { | |
158 | status = ADB_RET_BUS_ERROR; | |
159 | } | |
160 | ||
161 | return status; | |
162 | } | |
163 | ||
164 | static __inline__ void cuda_lock(AppleCuda * self) | |
165 | { | |
166 | if( !self->cuda_polled_mode) | |
167 | IOSimpleLockLock(self->cuda_request_lock); | |
168 | } | |
169 | ||
170 | static __inline__ void cuda_unlock(AppleCuda * self) | |
171 | { | |
172 | if( !self->cuda_polled_mode) | |
173 | IOSimpleLockUnlock(self->cuda_request_lock); | |
174 | } | |
175 | ||
176 | // | |
177 | // | |
178 | // | |
179 | ||
180 | ||
181 | static AppleCuda * gCuda; | |
182 | // ********************************************************************************** | |
183 | // init | |
184 | // | |
185 | // ********************************************************************************** | |
186 | bool AppleCuda::init ( OSDictionary * properties = 0 ) | |
187 | { | |
188 | return super::init(properties); | |
189 | } | |
190 | ||
191 | ||
192 | // ********************************************************************************** | |
193 | // start | |
194 | // | |
195 | // ********************************************************************************** | |
196 | bool AppleCuda::start ( IOService * nub ) | |
197 | { | |
198 | int i; | |
199 | IOMemoryMap * viaMap; | |
200 | unsigned char * cuda_base; | |
201 | ||
202 | if( !super::start(nub)) | |
203 | return false; | |
204 | ||
205 | gCuda = this; | |
206 | // callPlatformFunction symbols | |
207 | cuda_check_any_interrupt = OSSymbol::withCString("cuda_check_any_interrupt"); | |
208 | ||
209 | workLoop = NULL; | |
210 | eventSrc = NULL; | |
211 | ourADBinterface = NULL; | |
212 | _rootDomain = 0; | |
213 | ||
214 | workLoop = IOWorkLoop::workLoop(); | |
215 | if ( !workLoop ) { | |
216 | kprintf("Start is bailing\n"); | |
217 | return false; | |
218 | } | |
219 | ||
220 | eventSrc = IOInterruptEventSource::interruptEventSource(this, autopollArrived); | |
221 | if (!eventSrc || | |
222 | kIOReturnSuccess != workLoop->addEventSource(eventSrc) ) { | |
223 | kprintf("Start is bailing\n"); | |
224 | return false; | |
225 | } | |
226 | ||
227 | if( 0 == (viaMap = nub->mapDeviceMemoryWithIndex( 0 )) ) { | |
228 | IOLog("%s: no via memory\n", getName()); | |
229 | kprintf("Start is bailing\n"); | |
230 | return false; | |
231 | } | |
232 | cuda_base = (unsigned char *)viaMap->getVirtualAddress(); | |
233 | ||
234 | kprintf("VIA base = %08x\n", (UInt32)cuda_base); | |
235 | ourADBinterface = new IOCudaADBController; | |
236 | if ( !ourADBinterface ) { | |
237 | kprintf("Start is bailing\n"); | |
238 | return false; | |
239 | } | |
240 | if ( !ourADBinterface->init(0,this) ) { | |
241 | kprintf("Start is bailing\n"); | |
242 | return false; | |
243 | } | |
244 | ||
245 | if ( !ourADBinterface->attach( this) ) { | |
246 | kprintf("Start is bailing\n"); | |
247 | return false; | |
248 | } | |
249 | ||
250 | cuda_request_lock = IOSimpleLockAlloc(); | |
251 | IOSimpleLockInit(cuda_request_lock); | |
252 | ||
253 | cuda_via_regs.dataB = cuda_base; | |
254 | cuda_via_regs.handshakeDataA = cuda_base+0x0200; | |
255 | cuda_via_regs.dataDirectionB = cuda_base+0x0400; | |
256 | cuda_via_regs.dataDirectionA = cuda_base+0x0600; | |
257 | cuda_via_regs.timer1CounterLow = cuda_base+0x0800; | |
258 | cuda_via_regs.timer1CounterHigh = cuda_base+0x0A00; | |
259 | cuda_via_regs.timer1LatchLow = cuda_base+0x0C00; | |
260 | cuda_via_regs.timer1LatchHigh = cuda_base+0x0E00; | |
261 | cuda_via_regs.timer2CounterLow = cuda_base+0x1000; | |
262 | cuda_via_regs.timer2CounterHigh = cuda_base+0x1200; | |
263 | cuda_via_regs.shift = cuda_base+0x1400; | |
264 | cuda_via_regs.auxillaryControl = cuda_base+0x1600; | |
265 | cuda_via_regs.peripheralControl = cuda_base+0x1800; | |
266 | cuda_via_regs.interruptFlag = cuda_base+0x1A00; | |
267 | cuda_via_regs.interruptEnable = cuda_base+0x1C00; | |
268 | cuda_via_regs.dataA = cuda_base+0x1E00; | |
269 | ||
270 | // we require delays of this duration between certain state transitions | |
271 | clock_interval_to_absolutetime_interval(200, 1, &cuda_state_transition_delay); | |
272 | ||
273 | // Set the direction of the cuda signals. ByteACk and TIP are output and | |
274 | // TREQ is an input | |
275 | ||
276 | *cuda_via_regs.dataDirectionB |= (kCudaByteAcknowledgeMask | kCudaTransferInProgressMask); | |
277 | *cuda_via_regs.dataDirectionB &= ~kCudaTransferRequestMask; | |
278 | ||
279 | // Set the clock control. Set to shift data in by external clock CB1. | |
280 | ||
281 | *cuda_via_regs.auxillaryControl = (*cuda_via_regs.auxillaryControl | kCudaTransferMode) & | |
282 | kCudaSystemRecieve; | |
283 | ||
284 | // Clear any posible cuda interupt. | |
285 | ||
286 | if ( *cuda_via_regs.shift ); | |
287 | ||
288 | // Initialize the internal data. | |
289 | ||
290 | cuda_interrupt_state = CUDA_STATE_IDLE; | |
291 | cuda_transaction_state = CUDA_TS_NO_REQUEST; | |
292 | cuda_is_header_transfer = false; | |
293 | cuda_is_packet_type = false; | |
294 | cuda_transfer_count = 0; | |
295 | cuda_current_response = NULL; | |
296 | for( i = 0; i < NUM_AP_BUFFERS; i++ ) { | |
297 | cuda_unsolicited[ i ].a_buffer = cuda_autopoll_buffers[ i ]; | |
298 | } | |
299 | ||
300 | // Terminate transaction and set idle state | |
301 | ||
302 | cuda_neg_tip_and_byteack(this); | |
303 | ||
304 | // we want to delay 4 mS for ADB reset to complete | |
305 | ||
306 | IOSleep( 4 ); | |
307 | ||
308 | // Clear pending interrupt if any... | |
309 | ||
310 | (void)cuda_read_data(this); | |
311 | ||
312 | // Issue a Sync Transaction, ByteAck asserted while TIP is negated. | |
313 | ||
314 | cuda_assert_byte_ack(this); | |
315 | ||
316 | // Wait for the Sync acknowledgement, cuda to assert TREQ | |
317 | ||
318 | cuda_wait_for_transfer_request_assert(this); | |
319 | ||
320 | // Wait for the Sync acknowledgement interrupt. | |
321 | ||
322 | cuda_wait_for_interrupt(this); | |
323 | ||
324 | // Clear pending interrupt | |
325 | ||
326 | (void)cuda_read_data(this); | |
327 | ||
328 | // Terminate the sync cycle by Negating ByteAck | |
329 | ||
330 | cuda_neg_byte_ack(this); | |
331 | ||
332 | // Wait for the Sync termination acknowledgement, cuda negates TREQ. | |
333 | ||
334 | cuda_wait_for_transfer_request_neg(this); | |
335 | ||
336 | // Wait for the Sync termination acknowledgement interrupt. | |
337 | ||
338 | cuda_wait_for_interrupt(this); | |
339 | ||
340 | // Terminate transaction and set idle state, TIP negate and ByteAck negate. | |
341 | cuda_neg_transfer_in_progress(this); | |
342 | ||
343 | // Clear pending interrupt, if there is one... | |
344 | (void)cuda_read_data(this); | |
345 | ||
346 | #if 0 | |
347 | cuda_polled_mode = true; | |
348 | #else | |
349 | #define VIA_DEV_CUDA 2 | |
350 | nub->registerInterrupt(VIA_DEV_CUDA, | |
351 | this, (IOInterruptAction) cuda_interrupt); | |
352 | nub->enableInterrupt(VIA_DEV_CUDA); | |
353 | #endif | |
354 | ||
355 | PE_poll_input = Cuda_PE_poll_input; | |
356 | PE_read_write_time_of_day = Cuda_PE_read_write_time_of_day; | |
357 | PE_halt_restart = Cuda_PE_halt_restart; | |
358 | PE_write_IIC = Cuda_PE_write_IIC; | |
359 | publishResource( "IOiic0", this ); | |
360 | publishResource( "IORTC", this ); | |
361 | ||
362 | ||
363 | //set_cuda_power_message(kADB_powermsg_enable); //won't work on beige G3 | |
364 | thread_call_func(cuda_async_set_power_message_enable, (thread_call_param_t)this, true); | |
365 | thread_call_func(cuda_async_set_file_server_mode, (thread_call_param_t)this, true); | |
366 | ||
367 | registerService(); //Gossamer needs to find this driver for waking up G3 | |
368 | ||
369 | _cuda_power_state = 1; //default is wake state | |
370 | //We want to know when sleep is about to occur | |
371 | addNotification( gIOPublishNotification,serviceMatching("IOPMrootDomain"), | |
372 | (IOServiceNotificationHandler)CudahasRoot, this, 0 ); | |
373 | ||
374 | ourADBinterface->start( this ); | |
375 | ||
376 | return true; | |
377 | } | |
378 | ||
379 | /* Here are some power management functions so we can tell when system is | |
380 | going to sleep. */ | |
381 | bool CudahasRoot( OSObject * us, void *, IOService * yourDevice ) | |
382 | { | |
383 | if (( yourDevice != NULL ) && ((AppleCuda *)us)->_rootDomain == 0) | |
384 | { | |
385 | ((AppleCuda *)us)->_rootDomain = (IOPMrootDomain *) yourDevice; | |
386 | ((IOPMrootDomain *)yourDevice)->registerInterestedDriver((IOService *) us); | |
387 | } | |
388 | return true; | |
389 | } | |
390 | ||
391 | IOReturn AppleCuda::powerStateWillChangeTo ( IOPMPowerFlags theFlags, unsigned long unused1, | |
392 | IOService* unused2) | |
393 | { | |
394 | //kprintf("will change to %x", theFlags); | |
395 | if ( ! (theFlags & IOPMPowerOn) ) | |
396 | { | |
397 | _cuda_power_state = 0; //0 means sleeping | |
398 | } | |
399 | return IOPMAckImplied; | |
400 | } | |
401 | ||
402 | IOReturn AppleCuda::powerStateDidChangeTo ( IOPMPowerFlags theFlags, unsigned long unused1, | |
403 | IOService* unused2) | |
404 | { | |
405 | //kprintf("did change to %x", theFlags); | |
406 | if (theFlags & IOPMPowerOn) | |
407 | { | |
408 | _cuda_power_state = 1; //1 means awake | |
409 | } | |
410 | return IOPMAckImplied; | |
411 | } | |
412 | ||
413 | ||
414 | ||
415 | // ***************************************************************************** | |
416 | // getWorkLoop | |
417 | // | |
418 | // Return the cuda's workloop. | |
419 | // | |
420 | // ***************************************************************************** | |
421 | IOWorkLoop *AppleCuda::getWorkLoop() const | |
422 | { | |
423 | return workLoop; | |
424 | } | |
425 | ||
426 | // ***************************************************************************** | |
427 | // free | |
428 | // | |
429 | // Release everything we may have allocated. | |
430 | // | |
431 | // ***************************************************************************** | |
432 | void AppleCuda::free ( void ) | |
433 | { | |
434 | if ( workLoop ) { | |
435 | workLoop->release(); | |
436 | } | |
437 | if ( eventSrc ) { | |
438 | eventSrc->release(); | |
439 | } | |
440 | if ( ourADBinterface ) { | |
441 | ourADBinterface->release(); | |
442 | } | |
443 | if (_rootDomain) | |
444 | { | |
445 | _rootDomain->deRegisterInterestedDriver((IOService *) this); | |
446 | _rootDomain = 0; | |
447 | } | |
448 | super::free(); | |
449 | } | |
450 | ||
451 | ||
452 | // ********************************************************************************** | |
453 | // registerForADBInterrupts | |
454 | // | |
455 | // Some driver is calling to say it is prepared to receive "unsolicited" adb | |
456 | // interrupts (e.g. autopoll keyboard and trackpad data). The parameters identify | |
457 | // who to call when we get one. | |
458 | // ********************************************************************************** | |
459 | void AppleCuda::registerForADBInterrupts ( ADB_callback_func handler, IOService * caller ) | |
460 | { | |
461 | autopoll_handler = handler; | |
462 | ADBid = caller; | |
463 | } | |
464 | ||
465 | ||
466 | // ********************************************************************************** | |
467 | // autopollArrived | |
468 | // | |
469 | // ********************************************************************************** | |
470 | static void autopollArrived ( OSObject * CudaDriver, IOInterruptEventSource *, int ) | |
471 | { | |
472 | ((AppleCuda *)CudaDriver)->serviceAutopolls(); | |
473 | } | |
474 | ||
475 | #define RB_BOOT 1 /* Causes reboot, not halt. Is in xnu/bsd/sys/reboot.h */ | |
476 | extern "C" { | |
477 | void boot(int paniced, int howto, char * command); | |
478 | } | |
479 | ||
480 | ||
481 | static void cuda_async_set_power_message_enable( thread_call_param_t param, thread_call_param_t ) | |
482 | { | |
483 | //AppleCuda * me = (AppleCuda *) param; | |
484 | ||
485 | set_cuda_power_message(kADB_powermsg_enable); | |
486 | } | |
487 | ||
488 | static void cuda_async_set_file_server_mode( thread_call_param_t param, thread_call_param_t ) | |
489 | { | |
490 | set_cuda_file_server_mode(1); | |
491 | } | |
492 | ||
493 | // ********************************************************************************** | |
494 | // serviceAutopolls | |
495 | // We get here just before calling autopollHandler() in IOADBController.cpp | |
496 | // ********************************************************************************** | |
497 | void AppleCuda::serviceAutopolls ( void ) | |
498 | { | |
499 | cuda_packet_t * response; | |
500 | ||
501 | while( inIndex != outIndex ) { | |
502 | ||
503 | response = &cuda_unsolicited[ outIndex ]; | |
504 | ||
505 | //Check for power messages, which are handled differently from regular | |
506 | // autopoll data coming from mouse or keyboard. | |
507 | if (response->a_header[0] == ADB_PACKET_POWER) | |
508 | { | |
509 | unsigned char flag, cmd; | |
510 | ||
511 | flag = response->a_header[1]; | |
512 | cmd = response->a_header[2]; | |
513 | ||
514 | if ((flag == kADB_powermsg_flag_chassis) | |
515 | && (cmd == kADB_powermsg_cmd_chassis_off)) | |
516 | { | |
517 | thread_call_func(cuda_async_set_power_message_enable, | |
518 | (thread_call_param_t)this, true); | |
519 | ||
520 | if (_rootDomain) | |
521 | { | |
522 | if (_cuda_power_state) | |
523 | { | |
524 | //Put system to sleep now | |
525 | _rootDomain->receivePowerNotification (kIOPMSleepNow); | |
526 | } | |
527 | else //If asleep, wake up the system | |
528 | { | |
529 | //Tickle activity timer in root domain. This will not | |
530 | // wake up machine that is in demand-sleep, but it will | |
531 | // wake up an inactive system that dozed | |
532 | _rootDomain->activityTickle(0,0); | |
533 | } | |
534 | } | |
535 | } | |
536 | else if ((flag == kADB_powermsg_flag_keyboardpwr) | |
537 | && (cmd == kADB_powermsg_cmd_keyboardoff)) | |
538 | { | |
539 | //set_cuda_power_message(kADB_powermsg_continue); | |
540 | //This needs to by async so Beige G3 ADB won't lock up | |
541 | thread_call_func(cuda_async_set_power_message_enable, | |
542 | (thread_call_param_t)this, true); | |
543 | } | |
544 | ||
545 | } | |
546 | ||
547 | if ( ADBid != NULL ) { | |
548 | (*autopoll_handler)(ADBid,response->a_header[2],response->a_bcount,response->a_buffer); | |
549 | } | |
550 | ||
551 | outIndex = (outIndex + 1) & (NUM_AP_BUFFERS - 1); | |
552 | ||
553 | } //end of while loop | |
554 | ||
555 | } | |
556 | ||
557 | ||
558 | // ********************************************************************************** | |
559 | // doSyncRequest | |
560 | // | |
561 | // ********************************************************************************** | |
562 | IOReturn AppleCuda::doSyncRequest ( cuda_request_t * request ) | |
563 | { | |
564 | return(cuda_do_sync_request(this, request, false)); | |
565 | } | |
566 | ||
567 | ||
568 | IOReturn AppleCuda::callPlatformFunction(const OSSymbol *functionName, | |
569 | bool waitForFunction, | |
570 | void *param1, void *param2, | |
571 | void *param3, void *param4) | |
572 | { | |
573 | if (functionName == cuda_check_any_interrupt) | |
574 | { | |
575 | bool *hasint; | |
576 | ||
577 | hasint = (bool *)param1; | |
578 | *hasint = false; | |
579 | ||
580 | if (inIndex != outIndex) | |
581 | { | |
582 | *hasint = true; | |
583 | } | |
584 | return kIOReturnSuccess; | |
585 | } | |
586 | ||
587 | return kIOReturnBadArgument; | |
588 | } | |
589 | ||
590 | ||
591 | // ********************************************************************************** | |
592 | // cuda_do_sync_request | |
593 | // | |
594 | // ********************************************************************************** | |
595 | IOReturn cuda_do_sync_request ( AppleCuda * self, cuda_request_t * request, bool polled ) | |
596 | { | |
597 | bool wasPolled = false; | |
598 | IOInterruptState ints; | |
599 | ||
600 | if( !polled ) { | |
601 | request->sync = IOSyncer::create(); | |
602 | request->needWake = true; | |
603 | } | |
604 | ||
605 | ints = IOSimpleLockLockDisableInterrupt(self->cuda_request_lock); | |
606 | ||
607 | if( polled ) { | |
608 | wasPolled = self->cuda_polled_mode; | |
609 | self->cuda_polled_mode = polled; | |
610 | } | |
611 | ||
612 | if( self->cuda_last_request ) | |
613 | self->cuda_last_request->a_next = request; | |
614 | else | |
615 | self->cuda_request = request; | |
616 | ||
617 | self->cuda_last_request = request; | |
618 | ||
619 | if( self->cuda_interrupt_state == CUDA_STATE_IDLE ) | |
620 | cuda_send_request(self); | |
621 | ||
622 | if( polled ) { | |
623 | cuda_poll(self); | |
624 | self->cuda_polled_mode = wasPolled; | |
625 | assert( 0 == self->cuda_request ); | |
626 | assert( 0 == self->cuda_last_request ); | |
627 | } | |
628 | ||
629 | IOSimpleLockUnlockEnableInterrupt(self->cuda_request_lock, ints); | |
630 | ||
631 | if( !polled) | |
632 | request->sync->wait(); | |
633 | ||
634 | return cuda_get_result(request); | |
635 | } | |
636 | ||
637 | ||
638 | // ********************************************************************************** | |
639 | // Cuda_PE_read_write_time_of_day | |
640 | // | |
641 | // ********************************************************************************** | |
642 | static int Cuda_PE_read_write_time_of_day ( unsigned int options, long * secs ) | |
643 | { | |
644 | cuda_request_t cmd; | |
645 | ||
646 | adb_init_request(&cmd); | |
647 | ||
648 | cmd.a_cmd.a_hcount = 2; | |
649 | cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; | |
650 | ||
651 | switch( options ) { | |
652 | ||
653 | case kPEReadTOD: | |
654 | cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_GET_REAL_TIME; | |
655 | cmd.a_reply.a_buffer = (UInt8 *)secs; | |
656 | cmd.a_reply.a_bcount = sizeof(*secs); | |
657 | break; | |
658 | ||
659 | case kPEWriteTOD: | |
660 | cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_SET_REAL_TIME; | |
661 | cmd.a_cmd.a_buffer = (UInt8 *)secs; | |
662 | cmd.a_cmd.a_bcount = sizeof(*secs); | |
663 | break; | |
664 | ||
665 | default: | |
666 | return 1; | |
667 | } | |
668 | ||
669 | return cuda_do_sync_request(gCuda, &cmd, true); | |
670 | } | |
671 | ||
672 | ||
673 | // ********************************************************************************** | |
674 | // Cuda_PE_halt_restart | |
675 | // | |
676 | // ********************************************************************************** | |
677 | static int Cuda_PE_halt_restart ( unsigned int type ) | |
678 | { | |
679 | cuda_request_t cmd; | |
680 | ||
681 | adb_init_request(&cmd); | |
682 | ||
683 | cmd.a_cmd.a_hcount = 2; | |
684 | cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; | |
685 | ||
686 | switch( type ) { | |
687 | ||
688 | case kPERestartCPU: | |
689 | cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_RESTART_SYSTEM; | |
690 | break; | |
691 | ||
692 | case kPEHaltCPU: | |
693 | cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_POWER_DOWN; | |
694 | break; | |
695 | ||
696 | default: | |
697 | return 1; | |
698 | } | |
699 | ||
700 | return cuda_do_sync_request(gCuda, &cmd, true); | |
701 | } | |
702 | ||
703 | ||
704 | // ********************************************************************************** | |
705 | // In case this machine loses power, it will automatically reboot when power is | |
706 | // restored. Only desktop machines have Cuda, so this feature will not affect | |
707 | // PowerBooks. | |
708 | // ********************************************************************************** | |
709 | static int set_cuda_file_server_mode ( int command ) | |
710 | { | |
711 | cuda_request_t cmd; | |
712 | ||
713 | adb_init_request(&cmd); | |
714 | ||
715 | cmd.a_cmd.a_hcount = 3; | |
716 | cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; | |
717 | cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_FILE_SERVER_FLAG; | |
718 | cmd.a_cmd.a_header[2] = command; | |
719 | ||
720 | return cuda_do_sync_request(gCuda, &cmd, true); | |
721 | } | |
722 | ||
723 | // ********************************************************************************** | |
724 | // Fix front panel power key (mostly on Yosemites) so that one press won't power | |
725 | // down the entire machine | |
726 | // | |
727 | // ********************************************************************************** | |
728 | static int set_cuda_power_message ( int command ) | |
729 | { | |
730 | cuda_request_t cmd; | |
731 | ||
732 | if (command >= kADB_powermsg_invalid) | |
733 | return 0; //invalid Cuda power request | |
734 | ||
735 | adb_init_request(&cmd); | |
736 | ||
737 | cmd.a_cmd.a_hcount = 3; | |
738 | cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; | |
739 | cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_SET_POWER_MESSAGES; | |
740 | cmd.a_cmd.a_header[2] = command; | |
741 | ||
742 | return cuda_do_sync_request(gCuda, &cmd, true); | |
743 | } | |
744 | ||
745 | ||
746 | // ********************************************************************************** | |
747 | // Cuda_PE_write_IIC | |
748 | // | |
749 | // ********************************************************************************** | |
750 | static int Cuda_PE_write_IIC ( unsigned char addr, unsigned char reg, unsigned char data ) | |
751 | { | |
752 | cuda_request_t cmd; | |
753 | ||
754 | adb_init_request(&cmd); | |
755 | ||
756 | cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; | |
757 | cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_GET_SET_IIC; | |
758 | cmd.a_cmd.a_header[2] = addr; | |
759 | cmd.a_cmd.a_header[3] = reg; | |
760 | cmd.a_cmd.a_header[4] = data; | |
761 | cmd.a_cmd.a_hcount = 5; | |
762 | ||
763 | return cuda_do_sync_request(gCuda, &cmd, true); | |
764 | } | |
765 | ||
766 | IOReturn | |
767 | AppleCudaWriteIIC( UInt8 address, const UInt8 * buffer, IOByteCount * count ) | |
768 | { | |
769 | IOReturn ret; | |
770 | cuda_request_t cmd; | |
771 | ||
772 | if( !gCuda) | |
773 | return( kIOReturnUnsupported ); | |
774 | ||
775 | adb_init_request(&cmd); | |
776 | ||
777 | cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; | |
778 | cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_GET_SET_IIC; | |
779 | cmd.a_cmd.a_header[2] = address; | |
780 | cmd.a_cmd.a_hcount = 3; | |
781 | cmd.a_cmd.a_buffer = (UInt8 *) buffer; | |
782 | cmd.a_cmd.a_bcount = *count; | |
783 | ||
784 | ret = cuda_do_sync_request(gCuda, &cmd, true); | |
785 | ||
786 | *count = cmd.a_cmd.a_bcount; | |
787 | ||
788 | return( ret ); | |
789 | } | |
790 | ||
791 | IOReturn | |
792 | AppleCudaReadIIC( UInt8 address, UInt8 * buffer, IOByteCount * count ) | |
793 | { | |
794 | IOReturn ret; | |
795 | cuda_request_t cmd; | |
796 | ||
797 | if( !gCuda) | |
798 | return( kIOReturnUnsupported ); | |
799 | ||
800 | adb_init_request(&cmd); | |
801 | ||
802 | cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; | |
803 | cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_GET_SET_IIC; | |
804 | cmd.a_cmd.a_header[2] = address; | |
805 | cmd.a_cmd.a_hcount = 3; | |
806 | cmd.a_reply.a_buffer = buffer; | |
807 | cmd.a_reply.a_bcount = *count; | |
808 | ||
809 | ret = cuda_do_sync_request(gCuda, &cmd, true); | |
810 | *count = cmd.a_reply.a_bcount; | |
811 | ||
812 | return( ret ); | |
813 | } | |
814 | ||
815 | ||
816 | // ********************************************************************************** | |
817 | // Cuda_PE_poll_input | |
818 | // | |
819 | // ********************************************************************************** | |
820 | static int Cuda_PE_poll_input ( unsigned int, char * c ) | |
821 | { | |
822 | AppleCuda * self = gCuda; | |
823 | int interruptflag; | |
824 | UInt8 code; | |
825 | cuda_packet_t * response; //0123456789abcdef | |
826 | static char keycodes2ascii[] = "asdfhgzxcv_bqwer" //00 | |
827 | "yt123465=97-80]o" //10 | |
828 | "u[ip\nlj'k;_,/nm." //20 | |
829 | "\t_"; //30 | |
830 | ||
831 | *c = 0xff; | |
832 | ||
833 | if( !self ) { | |
834 | return 1; | |
835 | } | |
836 | ||
837 | self->cuda_polled_mode = true; | |
838 | interruptflag = *self->cuda_via_regs.interruptFlag & kCudaInterruptMask; | |
839 | eieio(); | |
840 | if( interruptflag ) { | |
841 | cuda_interrupt(self); | |
842 | } | |
843 | ||
844 | if( self->inIndex != self->outIndex ) { | |
845 | response = &self->cuda_unsolicited[ self->outIndex ]; | |
846 | if( ((response->a_header[2] >> 4) == 2) | |
847 | && (response->a_bcount > 1) ) { | |
848 | code = response->a_buffer[0]; | |
849 | if( code < sizeof(keycodes2ascii) ) { | |
850 | *c = keycodes2ascii[ code ]; | |
851 | } | |
852 | } | |
853 | self->outIndex = self->inIndex; | |
854 | } | |
855 | ||
856 | self->cuda_polled_mode = false; | |
857 | return 0; | |
858 | } | |
859 | ||
860 | ||
861 | // | |
862 | // internal | |
863 | // | |
864 | ||
865 | ||
866 | // ********************************************************************************** | |
867 | // cuda_send_request | |
868 | // | |
869 | // ********************************************************************************** | |
870 | static void cuda_send_request ( AppleCuda * self ) | |
871 | { | |
872 | ||
873 | // The data register must written with the data byte 25uS | |
874 | // after examining TREQ or we run the risk of getting out of sync | |
875 | // with Cuda. So call with disabled interrupts and spinlock held. | |
876 | ||
877 | // Check if we can commence with the packet transmission. First, check if | |
878 | // Cuda can service our request now. Second, check if Cuda wants to send | |
879 | // a response packet now. | |
880 | ||
881 | if( !cuda_is_transfer_in_progress(self) ) { | |
882 | // Set the shift register direction to output to Cuda by setting | |
883 | // the direction bit. | |
884 | ||
885 | cuda_set_data_direction_to_output(self); | |
886 | ||
887 | // Write the first byte to the shift register | |
888 | cuda_write_data(self, self->cuda_request->a_cmd.a_header[0]); | |
889 | ||
890 | // Set up the transfer state info here. | |
891 | ||
892 | self->cuda_is_header_transfer = true; | |
893 | self->cuda_transfer_count = 1; | |
894 | ||
895 | // Make sure we're in idle state before transaction, and then | |
896 | // assert TIP to tell Cuda we're starting command | |
897 | cuda_neg_byte_ack(self); | |
898 | cuda_assert_transfer_in_progress(self); | |
899 | ||
900 | // The next state is going to be a transmit state, if there is | |
901 | // no collision. This is a requested response but call it sync. | |
902 | ||
903 | self->cuda_interrupt_state = CUDA_STATE_TRANSMIT_EXPECTED; | |
904 | self->cuda_transaction_state = CUDA_TS_SYNC_RESPONSE; | |
905 | } | |
906 | ||
907 | #if 0 | |
908 | else { | |
909 | IOLog("Req = %x, state = %x, TIP = %x\n", self->cuda_request, | |
910 | self->cuda_interrupt_state, cuda_is_transfer_in_progress(self)); | |
911 | } | |
912 | #endif | |
913 | } | |
914 | ||
915 | ||
916 | // ********************************************************************************** | |
917 | // cuda_poll | |
918 | // | |
919 | // ********************************************************************************** | |
920 | static void cuda_poll( AppleCuda * self ) | |
921 | { | |
922 | do { | |
923 | cuda_wait_for_interrupt(self); | |
924 | cuda_interrupt(self); | |
925 | } while( self->cuda_interrupt_state != CUDA_STATE_IDLE ); | |
926 | } | |
927 | ||
928 | // | |
929 | // cuda_process_response | |
930 | // Execute at secondary interrupt. | |
931 | // | |
932 | ||
933 | ||
934 | // ********************************************************************************** | |
935 | // cuda_process_response | |
936 | // | |
937 | // ********************************************************************************** | |
938 | static void cuda_process_response ( AppleCuda * self ) | |
939 | { | |
940 | volatile cuda_request_t * request; | |
941 | unsigned int newIndex; | |
942 | ||
943 | // Almost ready for the next state, which should be a Idle state. | |
944 | // Just need to notifiy the client. | |
945 | ||
946 | if ( self->cuda_transaction_state == CUDA_TS_SYNC_RESPONSE ) { | |
947 | ||
948 | // dequeue reqeuest | |
949 | cuda_lock(self); | |
950 | request = self->cuda_request; | |
951 | if( NULL == (self->cuda_request = request->a_next) ) { | |
952 | self->cuda_last_request = NULL; | |
953 | } | |
954 | cuda_unlock(self); | |
955 | ||
956 | // wake the sync request thread | |
957 | if ( ((cuda_request_t *)request)->needWake ) { | |
958 | ((cuda_request_t *)request)->sync->signal(); | |
959 | } | |
960 | ||
961 | } | |
962 | else { | |
963 | if ( self->cuda_transaction_state == CUDA_TS_ASYNC_RESPONSE ) { | |
964 | newIndex = (self->inIndex + 1) & (NUM_AP_BUFFERS - 1); | |
965 | if( newIndex != self->outIndex ) { | |
966 | self->inIndex = newIndex; | |
967 | } | |
968 | else { | |
969 | // drop this packet, and reuse the buffer | |
970 | } | |
971 | if ( !self->cuda_polled_mode ) { | |
972 | // wake thread to service autopolls | |
973 | self->eventSrc->interruptOccurred(0, 0, 0); | |
974 | } | |
975 | } | |
976 | } | |
977 | return; | |
978 | } | |
979 | ||
980 | ||
981 | // ********************************************************************************** | |
982 | // cuda_interrupt | |
983 | // | |
984 | // ********************************************************************************** | |
985 | static void cuda_interrupt ( AppleCuda * self ) | |
986 | { | |
987 | unsigned char interruptState; | |
988 | ||
989 | // Get the relevant signal in determining the cause of the interrupt: | |
990 | // the shift direction, the transfer request line and the transfer | |
991 | // request line. | |
992 | ||
993 | interruptState = cuda_get_interrupt_state(self); | |
994 | ||
995 | //kprintf("%02x",interruptState); | |
996 | ||
997 | switch ( interruptState ) { | |
998 | case kCudaReceiveByte: | |
999 | cuda_receive_data(self); | |
1000 | break; | |
1001 | ||
1002 | case kCudaReceiveLastByte: | |
1003 | cuda_receive_last_byte(self); | |
1004 | break; | |
1005 | ||
1006 | case kCudaTransmitByte: | |
1007 | cuda_transmit_data(self); | |
1008 | break; | |
1009 | ||
1010 | case kCudaUnexpectedAttention: | |
1011 | cuda_unexpected_attention(self); | |
1012 | break; | |
1013 | ||
1014 | case kCudaExpectedAttention: | |
1015 | cuda_expected_attention(self); | |
1016 | break; | |
1017 | ||
1018 | case kCudaIdleState: | |
1019 | cuda_idle(self); | |
1020 | break; | |
1021 | ||
1022 | case kCudaCollision: | |
1023 | cuda_collision(self); | |
1024 | break; | |
1025 | ||
1026 | // Unknown interrupt, clear it and leave. | |
1027 | default: | |
1028 | cuda_error(self); | |
1029 | break; | |
1030 | } | |
1031 | } | |
1032 | ||
1033 | // | |
1034 | // TransmitCudaData | |
1035 | // Executes at hardware interrupt level. | |
1036 | // | |
1037 | ||
1038 | // ********************************************************************************** | |
1039 | // cuda_transmit_data | |
1040 | // | |
1041 | // ********************************************************************************** | |
1042 | static void cuda_transmit_data ( AppleCuda * self ) | |
1043 | { | |
1044 | // Clear the pending interrupt by reading the shift register. | |
1045 | ||
1046 | if ( self->cuda_is_header_transfer ) { | |
1047 | // There are more header bytes, write one out. | |
1048 | cuda_write_data(self, self->cuda_request->a_cmd.a_header[self->cuda_transfer_count++]); | |
1049 | ||
1050 | // Toggle the handshake line. | |
1051 | if ( self->cuda_transfer_count >= self->cuda_request->a_cmd.a_hcount ) { | |
1052 | self->cuda_is_header_transfer = FALSE; | |
1053 | self->cuda_transfer_count = 0; | |
1054 | } | |
1055 | ||
1056 | cuda_toggle_byte_ack( self); | |
1057 | } | |
1058 | else { | |
1059 | if ( self->cuda_transfer_count < self->cuda_request->a_cmd.a_bcount ) { | |
1060 | // There are more command bytes, write one out and update the pointer | |
1061 | cuda_write_data( self, | |
1062 | *(self->cuda_request->a_cmd.a_buffer + self->cuda_transfer_count++)); | |
1063 | // Toggle the handshake line. | |
1064 | cuda_toggle_byte_ack(self); | |
1065 | } | |
1066 | else { | |
1067 | (void)cuda_read_data(self); | |
1068 | // There is no more command bytes, terminate the send transaction. | |
1069 | // Cuda should send a expected attention interrupt soon. | |
1070 | ||
1071 | cuda_neg_tip_and_byteack(self); | |
1072 | ||
1073 | // The next interrupt should be a expected attention interrupt. | |
1074 | ||
1075 | self->cuda_interrupt_state = CUDA_STATE_ATTN_EXPECTED; | |
1076 | } | |
1077 | } | |
1078 | } | |
1079 | ||
1080 | // | |
1081 | // cuda_expected_attention | |
1082 | // Executes at hardware interrupt level. | |
1083 | // | |
1084 | ||
1085 | ||
1086 | // ********************************************************************************** | |
1087 | // cuda_expected_attention | |
1088 | // | |
1089 | // ********************************************************************************** | |
1090 | static void cuda_expected_attention ( AppleCuda * self ) | |
1091 | { | |
1092 | // Clear the pending interrupt by reading the shift register. | |
1093 | ||
1094 | (void)cuda_read_data(self); | |
1095 | ||
1096 | // Allow the VIA to settle directions.. else the possibility of | |
1097 | // data corruption. | |
1098 | cuda_do_state_transition_delay(self); | |
1099 | ||
1100 | if ( self->cuda_transaction_state == CUDA_TS_SYNC_RESPONSE ) { | |
1101 | self->cuda_current_response = (cuda_packet_t*)&self->cuda_request->a_reply; | |
1102 | } | |
1103 | else { | |
1104 | self->cuda_current_response = &self->cuda_unsolicited[ self->inIndex ]; | |
1105 | self->cuda_current_response->a_hcount = 0; | |
1106 | self->cuda_current_response->a_bcount = MAX_AP_RESPONSE; | |
1107 | } | |
1108 | ||
1109 | self->cuda_is_header_transfer = true; | |
1110 | self->cuda_is_packet_type = true; | |
1111 | self->cuda_transfer_count = 0; | |
1112 | ||
1113 | // Set the shift register direction to input. | |
1114 | cuda_set_data_direction_to_input(self); | |
1115 | ||
1116 | // Start the response packet transaction. | |
1117 | cuda_assert_transfer_in_progress(self); | |
1118 | ||
1119 | // The next interrupt should be a receive data interrupt. | |
1120 | self->cuda_interrupt_state = CUDA_STATE_RECEIVE_EXPECTED; | |
1121 | } | |
1122 | ||
1123 | // | |
1124 | // cuda_unexpected_attention | |
1125 | // Executes at hardware interrupt level. | |
1126 | // | |
1127 | ||
1128 | ||
1129 | // ********************************************************************************** | |
1130 | // cuda_expected_attention | |
1131 | // | |
1132 | // ********************************************************************************** | |
1133 | static void cuda_unexpected_attention ( AppleCuda * self ) | |
1134 | { | |
1135 | // Clear the pending interrupt by reading the shift register. | |
1136 | (void)cuda_read_data(self); | |
1137 | ||
1138 | // Get ready for a unsolicited response. | |
1139 | self->cuda_current_response = &self->cuda_unsolicited[ self->inIndex ]; | |
1140 | self->cuda_current_response->a_hcount = 0; | |
1141 | self->cuda_current_response->a_bcount = MAX_AP_RESPONSE; | |
1142 | ||
1143 | self->cuda_is_header_transfer = TRUE; | |
1144 | self->cuda_is_packet_type = TRUE; | |
1145 | self->cuda_transfer_count = 0; | |
1146 | ||
1147 | // Start the response packet transaction, Transaction In Progress | |
1148 | cuda_assert_transfer_in_progress(self); | |
1149 | ||
1150 | // The next interrupt should be a receive data interrupt and the next | |
1151 | // response should be an async response. | |
1152 | ||
1153 | self->cuda_interrupt_state = CUDA_STATE_RECEIVE_EXPECTED; | |
1154 | ||
1155 | self->cuda_transaction_state = CUDA_TS_ASYNC_RESPONSE; | |
1156 | } | |
1157 | ||
1158 | // | |
1159 | // cuda_receive_data | |
1160 | // Executes at hardware interrupt level. | |
1161 | // | |
1162 | ||
1163 | ||
1164 | // ********************************************************************************** | |
1165 | // cuda_receive_data | |
1166 | // | |
1167 | // ********************************************************************************** | |
1168 | static void cuda_receive_data ( AppleCuda * self ) | |
1169 | { | |
1170 | if ( self->cuda_is_packet_type ) { | |
1171 | unsigned char packetType; | |
1172 | ||
1173 | packetType = cuda_read_data( self); | |
1174 | self->cuda_current_response->a_header[self->cuda_transfer_count++] = packetType; | |
1175 | ||
1176 | if ( packetType == ADB_PACKET_ERROR) { | |
1177 | self->cuda_current_response->a_hcount = 4; | |
1178 | } | |
1179 | else { | |
1180 | self->cuda_current_response->a_hcount = 3; | |
1181 | } | |
1182 | ||
1183 | self->cuda_is_packet_type = false; | |
1184 | ||
1185 | cuda_toggle_byte_ack(self); | |
1186 | ||
1187 | } | |
1188 | else { | |
1189 | ||
1190 | ||
1191 | if ( self->cuda_is_header_transfer ) { | |
1192 | ||
1193 | self->cuda_current_response->a_header[self->cuda_transfer_count++] = | |
1194 | cuda_read_data(self); | |
1195 | ||
1196 | if (self->cuda_transfer_count >= self->cuda_current_response->a_hcount) { | |
1197 | self->cuda_is_header_transfer = FALSE; | |
1198 | self->cuda_transfer_count = 0; | |
1199 | } | |
1200 | ||
1201 | cuda_toggle_byte_ack(self); | |
1202 | } | |
1203 | else { | |
1204 | if ( self->cuda_transfer_count < self->cuda_current_response->a_bcount ) { | |
1205 | // Still room for more bytes. Get the byte and tell Cuda to continue. | |
1206 | // Toggle the handshake line, ByteAck, to acknowledge receive. | |
1207 | ||
1208 | *(self->cuda_current_response->a_buffer + self->cuda_transfer_count++) = | |
1209 | cuda_read_data(self); | |
1210 | cuda_toggle_byte_ack(self); | |
1211 | ||
1212 | } | |
1213 | else { | |
1214 | // Cuda is still sending data but the buffer is full. | |
1215 | // Normally should not get here. The only exceptions are open ended | |
1216 | // request such as PRAM read... In any event time to exit. | |
1217 | ||
1218 | self->cuda_current_response->a_bcount = self->cuda_transfer_count; | |
1219 | ||
1220 | cuda_read_data(self); | |
1221 | ||
1222 | cuda_process_response(self); | |
1223 | cuda_neg_tip_and_byteack(self); | |
1224 | } | |
1225 | } | |
1226 | } | |
1227 | } | |
1228 | ||
1229 | ||
1230 | // | |
1231 | // cuda_receive_last_byte | |
1232 | // Executes at hardware interrupt level. | |
1233 | // | |
1234 | ||
1235 | ||
1236 | // ********************************************************************************** | |
1237 | // cuda_receive_last_byte | |
1238 | // | |
1239 | // ********************************************************************************** | |
1240 | static void cuda_receive_last_byte ( AppleCuda * self ) | |
1241 | { | |
1242 | ||
1243 | if ( self->cuda_is_header_transfer ) { | |
1244 | self->cuda_current_response->a_header[self->cuda_transfer_count++] = | |
1245 | cuda_read_data(self); | |
1246 | ||
1247 | self->cuda_transfer_count = 0; | |
1248 | } | |
1249 | else { | |
1250 | if ( self->cuda_transfer_count < self->cuda_current_response->a_bcount ) { | |
1251 | *(self->cuda_current_response->a_buffer + self->cuda_transfer_count++) = | |
1252 | cuda_read_data(self); | |
1253 | } | |
1254 | else { | |
1255 | /* Overrun -- ignore data */ | |
1256 | (void) cuda_read_data(self); | |
1257 | } | |
1258 | } | |
1259 | self->cuda_current_response->a_bcount = self->cuda_transfer_count; | |
1260 | // acknowledge before response so polled mode can work | |
1261 | // from inside the handler | |
1262 | cuda_neg_tip_and_byteack(self); | |
1263 | cuda_process_response(self); | |
1264 | } | |
1265 | ||
1266 | ||
1267 | // | |
1268 | // cuda_collision | |
1269 | // Executes at hardware interrupt level. | |
1270 | // | |
1271 | ||
1272 | ||
1273 | // ********************************************************************************** | |
1274 | // cuda_collision | |
1275 | // | |
1276 | // ********************************************************************************** | |
1277 | static void cuda_collision ( AppleCuda * self ) | |
1278 | { | |
1279 | // Clear the pending interrupt by reading the shift register. | |
1280 | (void)cuda_read_data(self); | |
1281 | ||
1282 | // Negate TIP to abort the send. Cuda should send a second attention | |
1283 | // interrupt to acknowledge the abort cycle. | |
1284 | cuda_neg_transfer_in_progress(self); | |
1285 | ||
1286 | // The next interrupt should be an expected attention and the next | |
1287 | // response packet should be an async response. | |
1288 | ||
1289 | self->cuda_interrupt_state = CUDA_STATE_ATTN_EXPECTED; | |
1290 | self->cuda_transaction_state = CUDA_TS_ASYNC_RESPONSE; | |
1291 | ||
1292 | /* queue the request */ | |
1293 | self->cuda_is_header_transfer = false; | |
1294 | self->cuda_transfer_count = 0; | |
1295 | } | |
1296 | ||
1297 | ||
1298 | // | |
1299 | // | |
1300 | // Executes at hardware interrupt level. | |
1301 | // | |
1302 | ||
1303 | ||
1304 | // ********************************************************************************** | |
1305 | // cuda_idle | |
1306 | // | |
1307 | // ********************************************************************************** | |
1308 | static void cuda_idle ( AppleCuda * self ) | |
1309 | { | |
1310 | ||
1311 | // Clear the pending interrupt by reading the shift register. | |
1312 | (void)cuda_read_data(self); | |
1313 | ||
1314 | cuda_lock(self); | |
1315 | // Set to the idle state. | |
1316 | self->cuda_interrupt_state = CUDA_STATE_IDLE; | |
1317 | // See if there are any pending requests. | |
1318 | if( self->cuda_request ) { | |
1319 | cuda_send_request(self); | |
1320 | } | |
1321 | cuda_unlock(self); | |
1322 | } | |
1323 | ||
1324 | ||
1325 | // ********************************************************************************** | |
1326 | // cuda_error | |
1327 | // | |
1328 | // ********************************************************************************** | |
1329 | static void cuda_error ( AppleCuda * self ) | |
1330 | { | |
1331 | //printf("{Error %d}", self->cuda_transaction_state); | |
1332 | ||
1333 | // Was looking at cuda_transaction_state - doesn't seem right | |
1334 | ||
1335 | switch ( self->cuda_interrupt_state ) { | |
1336 | case CUDA_STATE_IDLE: | |
1337 | cuda_neg_tip_and_byteack(self); | |
1338 | break; | |
1339 | ||
1340 | case CUDA_STATE_TRANSMIT_EXPECTED: | |
1341 | if ( self->cuda_is_header_transfer && self->cuda_transfer_count <= 1 ) { | |
1342 | cuda_do_state_transition_delay(self); | |
1343 | cuda_neg_transfer_in_progress(self); | |
1344 | cuda_set_data_direction_to_input(self); | |
1345 | panic ("CUDA - TODO FORCE COMMAND BACK UP!\n"); | |
1346 | } | |
1347 | else { | |
1348 | self->cuda_interrupt_state = CUDA_STATE_ATTN_EXPECTED; | |
1349 | cuda_neg_tip_and_byteack(self); | |
1350 | } | |
1351 | break; | |
1352 | ||
1353 | case CUDA_STATE_ATTN_EXPECTED: | |
1354 | cuda_assert_transfer_in_progress(self); | |
1355 | ||
1356 | cuda_do_state_transition_delay(self); | |
1357 | cuda_set_data_direction_to_input(self); | |
1358 | cuda_neg_transfer_in_progress(self); | |
1359 | panic("CUDA - TODO CHECK FOR TRANSACTION TYPE AND ERROR"); | |
1360 | break; | |
1361 | ||
1362 | case CUDA_STATE_RECEIVE_EXPECTED: | |
1363 | cuda_neg_tip_and_byteack(self); | |
1364 | panic("Cuda - todo check for transaction type and error"); | |
1365 | break; | |
1366 | ||
1367 | default: | |
1368 | cuda_set_data_direction_to_input(self); | |
1369 | cuda_neg_tip_and_byteack(self); | |
1370 | break; | |
1371 | } | |
1372 | } | |
1373 | ||
1374 | static void cuda_do_state_transition_delay( AppleCuda * self ) | |
1375 | { | |
1376 | AbsoluteTime deadline; | |
1377 | ||
1378 | clock_absolutetime_interval_to_deadline( | |
1379 | self->cuda_state_transition_delay, &deadline); | |
1380 | clock_delay_until(deadline); | |
1381 | } |