filename | src/asic.c |
changeset | 1100:50e702af9373 |
prev | 1065:bc1cc0c54917 |
next | 1237:377077d10d62 |
author | nkeynes |
date | Mon Feb 15 17:27:14 2010 +1000 (13 years ago) |
permissions | -rw-r--r-- |
last change | Hook up the fake bios boot Use fakebios if invoked with -b, or if there's no boot rom loaded |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * Support for the miscellaneous ASIC functions (Primarily event multiplexing,
5 * and DMA).
6 *
7 * Copyright (c) 2005 Nathan Keynes.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
20 #define MODULE asic_module
22 #include <assert.h>
23 #include <stdlib.h>
24 #include "dream.h"
25 #include "mem.h"
26 #include "sh4/intc.h"
27 #include "sh4/dmac.h"
28 #include "sh4/sh4.h"
29 #include "dreamcast.h"
30 #include "maple/maple.h"
31 #include "gdrom/ide.h"
32 #include "pvr2/pvr2.h"
33 #include "asic.h"
34 #define MMIO_IMPL
35 #include "asic.h"
36 /*
37 * Open questions:
38 * 1) Does changing the mask after event occurance result in the
39 * interrupt being delivered immediately?
40 * TODO: Logic diagram of ASIC event/interrupt logic.
41 *
42 * ... don't even get me started on the "EXTDMA" page, about which, apparently,
43 * practically nothing is publicly known...
44 */
46 static void asic_check_cleared_events( void );
47 static void asic_init( void );
48 static void asic_reset( void );
49 static uint32_t asic_run_slice( uint32_t nanosecs );
50 static void asic_save_state( FILE *f );
51 static int asic_load_state( FILE *f );
52 static uint32_t g2_update_fifo_status( uint32_t slice_cycle );
54 struct dreamcast_module asic_module = { "ASIC", asic_init, asic_reset, NULL, asic_run_slice,
55 NULL, asic_save_state, asic_load_state };
57 #define G2_BIT5_TICKS 60
58 #define G2_BIT4_TICKS 160
59 #define G2_BIT0_ON_TICKS 120
60 #define G2_BIT0_OFF_TICKS 420
62 struct asic_g2_state {
63 int bit5_off_timer;
64 int bit4_on_timer;
65 int bit4_off_timer;
66 int bit0_on_timer;
67 int bit0_off_timer;
68 };
70 static struct asic_g2_state g2_state;
72 static uint32_t asic_run_slice( uint32_t nanosecs )
73 {
74 g2_update_fifo_status(nanosecs);
75 if( g2_state.bit5_off_timer <= (int32_t)nanosecs ) {
76 g2_state.bit5_off_timer = -1;
77 } else {
78 g2_state.bit5_off_timer -= nanosecs;
79 }
81 if( g2_state.bit4_off_timer <= (int32_t)nanosecs ) {
82 g2_state.bit4_off_timer = -1;
83 } else {
84 g2_state.bit4_off_timer -= nanosecs;
85 }
86 if( g2_state.bit4_on_timer <= (int32_t)nanosecs ) {
87 g2_state.bit4_on_timer = -1;
88 } else {
89 g2_state.bit4_on_timer -= nanosecs;
90 }
92 if( g2_state.bit0_off_timer <= (int32_t)nanosecs ) {
93 g2_state.bit0_off_timer = -1;
94 } else {
95 g2_state.bit0_off_timer -= nanosecs;
96 }
97 if( g2_state.bit0_on_timer <= (int32_t)nanosecs ) {
98 g2_state.bit0_on_timer = -1;
99 } else {
100 g2_state.bit0_on_timer -= nanosecs;
101 }
103 return nanosecs;
104 }
106 static void asic_init( void )
107 {
108 register_io_region( &mmio_region_ASIC );
109 register_io_region( &mmio_region_EXTDMA );
110 asic_reset();
111 }
113 static void asic_reset( void )
114 {
115 memset( &g2_state, 0xFF, sizeof(g2_state) );
116 }
118 static void asic_save_state( FILE *f )
119 {
120 fwrite( &g2_state, sizeof(g2_state), 1, f );
121 }
123 static int asic_load_state( FILE *f )
124 {
125 if( fread( &g2_state, sizeof(g2_state), 1, f ) != 1 )
126 return 1;
127 else
128 return 0;
129 }
132 /**
133 * Setup the timers for the 3 FIFO status bits following a write through the G2
134 * bus from the SH4 side. The timing is roughly as follows: (times are
135 * approximate based on software readings - I wouldn't take this as gospel but
136 * it seems to be enough to fool most programs).
137 * 0ns: Bit 5 (Input fifo?) goes high immediately on the write
138 * 40ns: Bit 5 goes low and bit 4 goes high
139 * 120ns: Bit 4 goes low, bit 0 goes high
140 * 240ns: Bit 0 goes low.
141 *
142 * Additional writes while the FIFO is in operation extend the time that the
143 * bits remain high as one might expect, without altering the time at which
144 * they initially go high.
145 */
146 void asic_g2_write_word()
147 {
148 if( g2_state.bit5_off_timer < (int32_t)sh4r.slice_cycle ) {
149 g2_state.bit5_off_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
150 } else {
151 g2_state.bit5_off_timer += G2_BIT5_TICKS;
152 }
154 if( g2_state.bit4_on_timer < (int32_t)sh4r.slice_cycle ) {
155 g2_state.bit4_on_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
156 }
158 if( g2_state.bit4_off_timer < (int32_t)sh4r.slice_cycle ) {
159 g2_state.bit4_off_timer = g2_state.bit4_on_timer + G2_BIT4_TICKS;
160 } else {
161 g2_state.bit4_off_timer += G2_BIT4_TICKS;
162 }
164 if( g2_state.bit0_on_timer < (int32_t)sh4r.slice_cycle ) {
165 g2_state.bit0_on_timer = sh4r.slice_cycle + G2_BIT0_ON_TICKS;
166 }
168 if( g2_state.bit0_off_timer < (int32_t)sh4r.slice_cycle ) {
169 g2_state.bit0_off_timer = g2_state.bit0_on_timer + G2_BIT0_OFF_TICKS;
170 } else {
171 g2_state.bit0_off_timer += G2_BIT0_OFF_TICKS;
172 }
174 MMIO_WRITE( ASIC, G2STATUS, MMIO_READ(ASIC, G2STATUS) | 0x20 );
175 }
177 static uint32_t g2_update_fifo_status( uint32_t nanos )
178 {
179 uint32_t val = MMIO_READ( ASIC, G2STATUS );
180 if( ((uint32_t)g2_state.bit5_off_timer) <= nanos ) {
181 val = val & (~0x20);
182 g2_state.bit5_off_timer = -1;
183 }
184 if( ((uint32_t)g2_state.bit4_on_timer) <= nanos ) {
185 val = val | 0x10;
186 g2_state.bit4_on_timer = -1;
187 }
188 if( ((uint32_t)g2_state.bit4_off_timer) <= nanos ) {
189 val = val & (~0x10);
190 g2_state.bit4_off_timer = -1;
191 }
193 if( ((uint32_t)g2_state.bit0_on_timer) <= nanos ) {
194 val = val | 0x01;
195 g2_state.bit0_on_timer = -1;
196 }
197 if( ((uint32_t)g2_state.bit0_off_timer) <= nanos ) {
198 val = val & (~0x01);
199 g2_state.bit0_off_timer = -1;
200 }
202 MMIO_WRITE( ASIC, G2STATUS, val );
203 return val;
204 }
206 static int g2_read_status() {
207 return g2_update_fifo_status( sh4r.slice_cycle );
208 }
211 void asic_event( int event )
212 {
213 int offset = ((event&0x60)>>3);
214 int result = (MMIO_READ(ASIC, PIRQ0 + offset)) |= (1<<(event&0x1F));
216 if( result & MMIO_READ(ASIC, IRQA0 + offset) )
217 intc_raise_interrupt( INT_IRQ13 );
218 if( result & MMIO_READ(ASIC, IRQB0 + offset) )
219 intc_raise_interrupt( INT_IRQ11 );
220 if( result & MMIO_READ(ASIC, IRQC0 + offset) )
221 intc_raise_interrupt( INT_IRQ9 );
223 if( event >= 64 ) { /* Third word */
224 asic_event( EVENT_CASCADE2 );
225 } else if( event >= 32 ) { /* Second word */
226 asic_event( EVENT_CASCADE1 );
227 }
228 }
230 void asic_clear_event( int event ) {
231 int offset = ((event&0x60)>>3);
232 uint32_t result = MMIO_READ(ASIC, PIRQ0 + offset) & (~(1<<(event&0x1F)));
233 MMIO_WRITE( ASIC, PIRQ0 + offset, result );
234 if( result == 0 ) {
235 /* clear cascades if necessary */
236 if( event >= 64 ) {
237 MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
238 } else if( event >= 32 ) {
239 MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0xBFFFFFFF );
240 }
241 }
243 asic_check_cleared_events();
244 }
246 void asic_check_cleared_events( )
247 {
248 int i, setA = 0, setB = 0, setC = 0;
249 uint32_t bits;
250 for( i=0; i<12; i+=4 ) {
251 bits = MMIO_READ( ASIC, PIRQ0 + i );
252 setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
253 setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
254 setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
255 }
256 if( setA == 0 )
257 intc_clear_interrupt( INT_IRQ13 );
258 if( setB == 0 )
259 intc_clear_interrupt( INT_IRQ11 );
260 if( setC == 0 )
261 intc_clear_interrupt( INT_IRQ9 );
262 }
264 void asic_event_mask_changed( )
265 {
266 int i, setA = 0, setB = 0, setC = 0;
267 uint32_t bits;
268 for( i=0; i<12; i+=4 ) {
269 bits = MMIO_READ( ASIC, PIRQ0 + i );
270 setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
271 setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
272 setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
273 }
274 if( setA == 0 )
275 intc_clear_interrupt( INT_IRQ13 );
276 else
277 intc_raise_interrupt( INT_IRQ13 );
278 if( setB == 0 )
279 intc_clear_interrupt( INT_IRQ11 );
280 else
281 intc_raise_interrupt( INT_IRQ11 );
282 if( setC == 0 )
283 intc_clear_interrupt( INT_IRQ9 );
284 else
285 intc_raise_interrupt( INT_IRQ9 );
286 }
288 void g2_dma_transfer( int channel )
289 {
290 uint32_t offset = channel << 5;
292 if( MMIO_READ( EXTDMA, G2DMA0CTL1 + offset ) == 1 ) {
293 if( MMIO_READ( EXTDMA, G2DMA0CTL2 + offset ) == 1 ) {
294 uint32_t extaddr = MMIO_READ( EXTDMA, G2DMA0EXT + offset );
295 uint32_t sh4addr = MMIO_READ( EXTDMA, G2DMA0SH4 + offset );
296 uint32_t length = MMIO_READ( EXTDMA, G2DMA0SIZ + offset ) & 0x1FFFFFFF;
297 uint32_t dir = MMIO_READ( EXTDMA, G2DMA0DIR + offset );
298 // uint32_t mode = MMIO_READ( EXTDMA, G2DMA0MOD + offset );
299 unsigned char buf[length];
300 if( dir == 0 ) { /* SH4 to device */
301 mem_copy_from_sh4( buf, sh4addr, length );
302 mem_copy_to_sh4( extaddr, buf, length );
303 } else { /* Device to SH4 */
304 mem_copy_from_sh4( buf, extaddr, length );
305 mem_copy_to_sh4( sh4addr, buf, length );
306 }
307 MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
308 asic_event( EVENT_G2_DMA0 + channel );
309 } else {
310 MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
311 }
312 }
313 }
315 void asic_ide_dma_transfer( )
316 {
317 if( MMIO_READ( EXTDMA, IDEDMACTL2 ) == 1 ) {
318 if( MMIO_READ( EXTDMA, IDEDMACTL1 ) == 1 ) {
319 MMIO_WRITE( EXTDMA, IDEDMATXSIZ, 0 );
321 uint32_t addr = MMIO_READ( EXTDMA, IDEDMASH4 );
322 uint32_t length = MMIO_READ( EXTDMA, IDEDMASIZ );
323 // int dir = MMIO_READ( EXTDMA, IDEDMADIR );
325 uint32_t xfer = ide_read_data_dma( addr, length );
326 MMIO_WRITE( EXTDMA, IDEDMATXSIZ, xfer );
327 MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
328 asic_event( EVENT_IDE_DMA );
329 } else { /* 0 */
330 MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
331 }
332 }
333 }
335 void pvr_dma_transfer( )
336 {
337 sh4addr_t destaddr = MMIO_READ( ASIC, PVRDMADEST) &0x1FFFFFE0;
338 uint32_t count = MMIO_READ( ASIC, PVRDMACNT );
339 unsigned char *data = alloca( count );
340 uint32_t rcount = DMAC_get_buffer( 2, data, count );
341 if( rcount != count )
342 WARN( "PVR received %08X bytes from DMA, expected %08X", rcount, count );
344 pvr2_dma_write( destaddr, data, rcount );
346 MMIO_WRITE( ASIC, PVRDMACTL, 0 );
347 MMIO_WRITE( ASIC, PVRDMACNT, 0 );
348 if( destaddr & 0x01000000 ) { /* Write to texture RAM */
349 MMIO_WRITE( ASIC, PVRDMADEST, destaddr + rcount );
350 }
351 asic_event( EVENT_PVR_DMA );
352 }
354 void pvr_dma2_transfer()
355 {
356 if( MMIO_READ( EXTDMA, PVRDMA2CTL2 ) == 1 ) {
357 if( MMIO_READ( EXTDMA, PVRDMA2CTL1 ) == 1 ) {
358 sh4addr_t extaddr = MMIO_READ( EXTDMA, PVRDMA2EXT );
359 sh4addr_t sh4addr = MMIO_READ( EXTDMA, PVRDMA2SH4 );
360 int dir = MMIO_READ( EXTDMA, PVRDMA2DIR );
361 uint32_t length = MMIO_READ( EXTDMA, PVRDMA2SIZ );
362 unsigned char buf[length];
363 if( dir == 0 ) { /* SH4 to PVR */
364 mem_copy_from_sh4( buf, sh4addr, length );
365 mem_copy_to_sh4( extaddr, buf, length );
366 } else { /* PVR to SH4 */
367 mem_copy_from_sh4( buf, extaddr, length );
368 mem_copy_to_sh4( sh4addr, buf, length );
369 }
370 MMIO_WRITE( EXTDMA, PVRDMA2CTL2, 0 );
371 asic_event( EVENT_PVR_DMA2 );
372 }
373 }
374 }
376 void sort_dma_transfer( )
377 {
378 sh4addr_t table_addr = MMIO_READ( ASIC, SORTDMATBL );
379 sh4addr_t data_addr = MMIO_READ( ASIC, SORTDMADATA );
380 int table_size = MMIO_READ( ASIC, SORTDMATSIZ );
381 int addr_shift = MMIO_READ( ASIC, SORTDMAASIZ ) ? 5 : 0;
382 int count = 1;
384 uint32_t *table32 = (uint32_t *)mem_get_region( table_addr );
385 uint16_t *table16 = (uint16_t *)table32;
386 uint32_t next = table_size ? (*table32++) : (uint32_t)(*table16++);
387 while(1) {
388 next &= 0x07FFFFFF;
389 if( next == 1 ) {
390 next = table_size ? (*table32++) : (uint32_t)(*table16++);
391 count++;
392 continue;
393 } else if( next == 2 ) {
394 asic_event( EVENT_SORT_DMA );
395 break;
396 }
397 uint32_t *data = (uint32_t *)mem_get_region(data_addr + (next<<addr_shift));
398 if( data == NULL ) {
399 break;
400 }
402 uint32_t *poly = pvr2_ta_find_polygon_context(data, 128);
403 if( poly == NULL ) {
404 asic_event( EVENT_SORT_DMA_ERR );
405 break;
406 }
407 uint32_t size = poly[6] & 0xFF;
408 if( size == 0 ) {
409 size = 0x100;
410 }
411 next = poly[7];
412 pvr2_ta_write( (unsigned char *)data, size<<5 );
413 }
415 MMIO_WRITE( ASIC, SORTDMACNT, count );
416 MMIO_WRITE( ASIC, SORTDMACTL, 0 );
417 }
419 gboolean asic_enable_ide_interface( gboolean enable )
420 {
421 gboolean oldval = idereg.interface_enabled;
422 idereg.interface_enabled = enable;
423 return oldval;
424 }
426 MMIO_REGION_READ_FN( ASIC, reg )
427 {
428 int32_t val;
429 reg &= 0xFFF;
430 switch( reg ) {
431 case PIRQ0:
432 case PIRQ1:
433 case PIRQ2:
434 case IRQA0:
435 case IRQA1:
436 case IRQA2:
437 case IRQB0:
438 case IRQB1:
439 case IRQB2:
440 case IRQC0:
441 case IRQC1:
442 case IRQC2:
443 case MAPLE_STATE:
444 val = MMIO_READ(ASIC, reg);
445 return val;
446 case G2STATUS:
447 return g2_read_status();
448 default:
449 val = MMIO_READ(ASIC, reg);
450 return val;
451 }
453 }
455 MMIO_REGION_READ_DEFSUBFNS(ASIC)
457 MMIO_REGION_WRITE_FN( ASIC, reg, val )
458 {
459 reg &= 0xFFF;
460 switch( reg ) {
461 case PIRQ1:
462 break; /* Treat this as read-only for the moment */
463 case PIRQ0:
464 val = val & 0x3FFFFFFF; /* Top two bits aren't clearable */
465 MMIO_WRITE( ASIC, reg, MMIO_READ(ASIC, reg)&~val );
466 asic_check_cleared_events();
467 break;
468 case PIRQ2:
469 /* Clear any events */
470 val = MMIO_READ(ASIC, reg)&(~val);
471 MMIO_WRITE( ASIC, reg, val );
472 if( val == 0 ) { /* all clear - clear the cascade bit */
473 MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
474 }
475 asic_check_cleared_events();
476 break;
477 case IRQA0:
478 case IRQA1:
479 case IRQA2:
480 case IRQB0:
481 case IRQB1:
482 case IRQB2:
483 case IRQC0:
484 case IRQC1:
485 case IRQC2:
486 MMIO_WRITE( ASIC, reg, val );
487 asic_event_mask_changed();
488 break;
489 case SYSRESET:
490 if( val == 0x7611 ) {
491 dreamcast_reset();
492 } else {
493 WARN( "Unknown value %08X written to SYSRESET port", val );
494 }
495 break;
496 case MAPLE_STATE:
497 MMIO_WRITE( ASIC, reg, val );
498 if( val & 1 ) {
499 uint32_t maple_addr = MMIO_READ( ASIC, MAPLE_DMA) &0x1FFFFFE0;
500 maple_handle_buffer( maple_addr );
501 MMIO_WRITE( ASIC, reg, 0 );
502 }
503 break;
504 case PVRDMADEST:
505 MMIO_WRITE( ASIC, reg, (val & 0x03FFFFE0) | 0x10000000 );
506 break;
507 case PVRDMACNT:
508 MMIO_WRITE( ASIC, reg, val & 0x00FFFFE0 );
509 break;
510 case PVRDMACTL: /* Initiate PVR DMA transfer */
511 val = val & 0x01;
512 MMIO_WRITE( ASIC, reg, val );
513 if( val == 1 ) {
514 pvr_dma_transfer();
515 }
516 break;
517 case SORTDMATBL: case SORTDMADATA:
518 MMIO_WRITE( ASIC, reg, (val & 0x0FFFFFE0) | 0x08000000 );
519 break;
520 case SORTDMATSIZ: case SORTDMAASIZ:
521 MMIO_WRITE( ASIC, reg, (val & 1) );
522 break;
523 case SORTDMACTL:
524 val = val & 1;
525 MMIO_WRITE( ASIC, reg, val );
526 if( val == 1 ) {
527 sort_dma_transfer();
528 }
529 break;
530 case MAPLE_DMA:
531 MMIO_WRITE( ASIC, reg, val );
532 break;
533 default:
534 MMIO_WRITE( ASIC, reg, val );
535 }
536 }
538 MMIO_REGION_READ_FN( EXTDMA, reg )
539 {
540 uint32_t val;
541 reg &= 0xFFF;
542 if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
543 return 0xFFFFFFFF; /* disabled */
544 }
546 switch( reg ) {
547 case IDEALTSTATUS:
548 val = idereg.status;
549 return val;
550 case IDEDATA: return ide_read_data_pio( );
551 case IDEFEAT: return idereg.error;
552 case IDECOUNT:return idereg.count;
553 case IDELBA0: return ide_get_drive_status();
554 case IDELBA1: return idereg.lba1;
555 case IDELBA2: return idereg.lba2;
556 case IDEDEV: return idereg.device;
557 case IDECMD:
558 val = ide_read_status();
559 return val;
560 default:
561 val = MMIO_READ( EXTDMA, reg );
562 return val;
563 }
564 }
565 MMIO_REGION_READ_DEFSUBFNS(EXTDMA)
568 MMIO_REGION_WRITE_FN( EXTDMA, reg, val )
569 {
570 reg &= 0xFFF;
571 if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
572 return; /* disabled */
573 }
575 switch( reg ) {
576 case IDEALTSTATUS: /* Device control */
577 ide_write_control( val );
578 break;
579 case IDEDATA:
580 ide_write_data_pio( val );
581 break;
582 case IDEFEAT:
583 if( ide_can_write_regs() )
584 idereg.feature = (uint8_t)val;
585 break;
586 case IDECOUNT:
587 if( ide_can_write_regs() )
588 idereg.count = (uint8_t)val;
589 break;
590 case IDELBA0:
591 if( ide_can_write_regs() )
592 idereg.lba0 = (uint8_t)val;
593 break;
594 case IDELBA1:
595 if( ide_can_write_regs() )
596 idereg.lba1 = (uint8_t)val;
597 break;
598 case IDELBA2:
599 if( ide_can_write_regs() )
600 idereg.lba2 = (uint8_t)val;
601 break;
602 case IDEDEV:
603 if( ide_can_write_regs() )
604 idereg.device = (uint8_t)val;
605 break;
606 case IDECMD:
607 if( ide_can_write_regs() || val == IDE_CMD_NOP ) {
608 ide_write_command( (uint8_t)val );
609 }
610 break;
611 case IDEDMASH4:
612 MMIO_WRITE( EXTDMA, reg, val & 0x1FFFFFE0 );
613 break;
614 case IDEDMASIZ:
615 MMIO_WRITE( EXTDMA, reg, val & 0x01FFFFFE );
616 break;
617 case IDEDMADIR:
618 MMIO_WRITE( EXTDMA, reg, val & 1 );
619 break;
620 case IDEDMACTL1:
621 case IDEDMACTL2:
622 MMIO_WRITE( EXTDMA, reg, val & 0x01 );
623 asic_ide_dma_transfer( );
624 break;
625 case IDEACTIVATE:
626 if( val == 0x001FFFFF ) {
627 idereg.interface_enabled = TRUE;
628 /* Conventional wisdom says that this is necessary but not
629 * sufficient to enable the IDE interface.
630 */
631 } else if( val == 0x000042FE ) {
632 idereg.interface_enabled = FALSE;
633 }
634 break;
635 case G2DMA0EXT: case G2DMA0SH4: case G2DMA0SIZ:
636 case G2DMA1EXT: case G2DMA1SH4: case G2DMA1SIZ:
637 case G2DMA2EXT: case G2DMA2SH4: case G2DMA2SIZ:
638 case G2DMA3EXT: case G2DMA3SH4: case G2DMA3SIZ:
639 MMIO_WRITE( EXTDMA, reg, val & 0x9FFFFFE0 );
640 break;
641 case G2DMA0MOD: case G2DMA1MOD: case G2DMA2MOD: case G2DMA3MOD:
642 MMIO_WRITE( EXTDMA, reg, val & 0x07 );
643 break;
644 case G2DMA0DIR: case G2DMA1DIR: case G2DMA2DIR: case G2DMA3DIR:
645 MMIO_WRITE( EXTDMA, reg, val & 0x01 );
646 break;
647 case G2DMA0CTL1:
648 case G2DMA0CTL2:
649 MMIO_WRITE( EXTDMA, reg, val & 1);
650 g2_dma_transfer( 0 );
651 break;
652 case G2DMA0STOP:
653 MMIO_WRITE( EXTDMA, reg, val & 0x37 );
654 break;
655 case G2DMA1CTL1:
656 case G2DMA1CTL2:
657 MMIO_WRITE( EXTDMA, reg, val & 1);
658 g2_dma_transfer( 1 );
659 break;
661 case G2DMA1STOP:
662 MMIO_WRITE( EXTDMA, reg, val & 0x37 );
663 break;
664 case G2DMA2CTL1:
665 case G2DMA2CTL2:
666 MMIO_WRITE( EXTDMA, reg, val &1 );
667 g2_dma_transfer( 2 );
668 break;
669 case G2DMA2STOP:
670 MMIO_WRITE( EXTDMA, reg, val & 0x37 );
671 break;
672 case G2DMA3CTL1:
673 case G2DMA3CTL2:
674 MMIO_WRITE( EXTDMA, reg, val &1 );
675 g2_dma_transfer( 3 );
676 break;
677 case G2DMA3STOP:
678 MMIO_WRITE( EXTDMA, reg, val & 0x37 );
679 break;
680 case PVRDMA2CTL1:
681 case PVRDMA2CTL2:
682 MMIO_WRITE( EXTDMA, reg, val & 1 );
683 pvr_dma2_transfer();
684 break;
685 default:
686 MMIO_WRITE( EXTDMA, reg, val );
687 }
688 }
.