4 * Support for the miscellaneous ASIC functions (Primarily event multiplexing,
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #define MODULE asic_module
29 #include "dreamcast.h"
30 #include "maple/maple.h"
31 #include "gdrom/ide.h"
32 #include "pvr2/pvr2.h"
38 * 1) Does changing the mask after event occurance result in the
39 * interrupt being delivered immediately?
40 * TODO: Logic diagram of ASIC event/interrupt logic.
42 * ... don't even get me started on the "EXTDMA" page, about which, apparently,
43 * practically nothing is publicly known...
46 static void asic_check_cleared_events( void );
47 static void asic_init( void );
48 static void asic_reset( void );
49 static uint32_t asic_run_slice( uint32_t nanosecs );
50 static void asic_save_state( FILE *f );
51 static int asic_load_state( FILE *f );
52 static uint32_t g2_update_fifo_status( uint32_t slice_cycle );
54 struct dreamcast_module asic_module = { "ASIC", asic_init, asic_reset, NULL, asic_run_slice,
55 NULL, asic_save_state, asic_load_state };
57 #define G2_BIT5_TICKS 60
58 #define G2_BIT4_TICKS 160
59 #define G2_BIT0_ON_TICKS 120
60 #define G2_BIT0_OFF_TICKS 420
62 struct asic_g2_state {
70 static struct asic_g2_state g2_state;
72 static uint32_t asic_run_slice( uint32_t nanosecs )
74 g2_update_fifo_status(nanosecs);
75 if( g2_state.bit5_off_timer <= (int32_t)nanosecs ) {
76 g2_state.bit5_off_timer = -1;
78 g2_state.bit5_off_timer -= nanosecs;
81 if( g2_state.bit4_off_timer <= (int32_t)nanosecs ) {
82 g2_state.bit4_off_timer = -1;
84 g2_state.bit4_off_timer -= nanosecs;
86 if( g2_state.bit4_on_timer <= (int32_t)nanosecs ) {
87 g2_state.bit4_on_timer = -1;
89 g2_state.bit4_on_timer -= nanosecs;
92 if( g2_state.bit0_off_timer <= (int32_t)nanosecs ) {
93 g2_state.bit0_off_timer = -1;
95 g2_state.bit0_off_timer -= nanosecs;
97 if( g2_state.bit0_on_timer <= (int32_t)nanosecs ) {
98 g2_state.bit0_on_timer = -1;
100 g2_state.bit0_on_timer -= nanosecs;
106 static void asic_init( void )
108 register_io_region( &mmio_region_ASIC );
109 register_io_region( &mmio_region_EXTDMA );
113 static void asic_reset( void )
115 memset( &g2_state, 0xFF, sizeof(g2_state) );
118 static void asic_save_state( FILE *f )
120 fwrite( &g2_state, sizeof(g2_state), 1, f );
123 static int asic_load_state( FILE *f )
125 if( fread( &g2_state, sizeof(g2_state), 1, f ) != 1 )
133 * Setup the timers for the 3 FIFO status bits following a write through the G2
134 * bus from the SH4 side. The timing is roughly as follows: (times are
135 * approximate based on software readings - I wouldn't take this as gospel but
136 * it seems to be enough to fool most programs).
137 * 0ns: Bit 5 (Input fifo?) goes high immediately on the write
138 * 40ns: Bit 5 goes low and bit 4 goes high
139 * 120ns: Bit 4 goes low, bit 0 goes high
140 * 240ns: Bit 0 goes low.
142 * Additional writes while the FIFO is in operation extend the time that the
143 * bits remain high as one might expect, without altering the time at which
144 * they initially go high.
146 void asic_g2_write_word()
148 if( g2_state.bit5_off_timer < (int32_t)sh4r.slice_cycle ) {
149 g2_state.bit5_off_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
151 g2_state.bit5_off_timer += G2_BIT5_TICKS;
154 if( g2_state.bit4_on_timer < (int32_t)sh4r.slice_cycle ) {
155 g2_state.bit4_on_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
158 if( g2_state.bit4_off_timer < (int32_t)sh4r.slice_cycle ) {
159 g2_state.bit4_off_timer = g2_state.bit4_on_timer + G2_BIT4_TICKS;
161 g2_state.bit4_off_timer += G2_BIT4_TICKS;
164 if( g2_state.bit0_on_timer < (int32_t)sh4r.slice_cycle ) {
165 g2_state.bit0_on_timer = sh4r.slice_cycle + G2_BIT0_ON_TICKS;
168 if( g2_state.bit0_off_timer < (int32_t)sh4r.slice_cycle ) {
169 g2_state.bit0_off_timer = g2_state.bit0_on_timer + G2_BIT0_OFF_TICKS;
171 g2_state.bit0_off_timer += G2_BIT0_OFF_TICKS;
174 MMIO_WRITE( ASIC, G2STATUS, MMIO_READ(ASIC, G2STATUS) | 0x20 );
177 static uint32_t g2_update_fifo_status( uint32_t nanos )
179 uint32_t val = MMIO_READ( ASIC, G2STATUS );
180 if( ((uint32_t)g2_state.bit5_off_timer) <= nanos ) {
182 g2_state.bit5_off_timer = -1;
184 if( ((uint32_t)g2_state.bit4_on_timer) <= nanos ) {
186 g2_state.bit4_on_timer = -1;
188 if( ((uint32_t)g2_state.bit4_off_timer) <= nanos ) {
190 g2_state.bit4_off_timer = -1;
193 if( ((uint32_t)g2_state.bit0_on_timer) <= nanos ) {
195 g2_state.bit0_on_timer = -1;
197 if( ((uint32_t)g2_state.bit0_off_timer) <= nanos ) {
199 g2_state.bit0_off_timer = -1;
202 MMIO_WRITE( ASIC, G2STATUS, val );
206 static int g2_read_status() {
207 return g2_update_fifo_status( sh4r.slice_cycle );
211 void asic_event( int event )
213 int offset = ((event&0x60)>>3);
214 int result = (MMIO_READ(ASIC, PIRQ0 + offset)) |= (1<<(event&0x1F));
216 if( result & MMIO_READ(ASIC, IRQA0 + offset) )
217 intc_raise_interrupt( INT_IRQ13 );
218 if( result & MMIO_READ(ASIC, IRQB0 + offset) )
219 intc_raise_interrupt( INT_IRQ11 );
220 if( result & MMIO_READ(ASIC, IRQC0 + offset) )
221 intc_raise_interrupt( INT_IRQ9 );
223 if( event >= 64 ) { /* Third word */
224 asic_event( EVENT_CASCADE2 );
225 } else if( event >= 32 ) { /* Second word */
226 asic_event( EVENT_CASCADE1 );
230 void asic_clear_event( int event ) {
231 int offset = ((event&0x60)>>3);
232 uint32_t result = MMIO_READ(ASIC, PIRQ0 + offset) & (~(1<<(event&0x1F)));
233 MMIO_WRITE( ASIC, PIRQ0 + offset, result );
235 /* clear cascades if necessary */
237 MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
238 } else if( event >= 32 ) {
239 MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0xBFFFFFFF );
243 asic_check_cleared_events();
246 void asic_check_cleared_events( )
248 int i, setA = 0, setB = 0, setC = 0;
250 for( i=0; i<3; i++ ) {
251 bits = MMIO_READ( ASIC, PIRQ0 + i );
252 setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
253 setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
254 setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
257 intc_clear_interrupt( INT_IRQ13 );
259 intc_clear_interrupt( INT_IRQ11 );
261 intc_clear_interrupt( INT_IRQ9 );
264 void g2_dma_transfer( int channel )
266 uint32_t offset = channel << 5;
268 if( MMIO_READ( EXTDMA, G2DMA0CTL1 + offset ) == 1 ) {
269 if( MMIO_READ( EXTDMA, G2DMA0CTL2 + offset ) == 1 ) {
270 uint32_t extaddr = MMIO_READ( EXTDMA, G2DMA0EXT + offset );
271 uint32_t sh4addr = MMIO_READ( EXTDMA, G2DMA0SH4 + offset );
272 uint32_t length = MMIO_READ( EXTDMA, G2DMA0SIZ + offset ) & 0x1FFFFFFF;
273 uint32_t dir = MMIO_READ( EXTDMA, G2DMA0DIR + offset );
274 // uint32_t mode = MMIO_READ( EXTDMA, G2DMA0MOD + offset );
275 unsigned char buf[length];
276 if( dir == 0 ) { /* SH4 to device */
277 mem_copy_from_sh4( buf, sh4addr, length );
278 mem_copy_to_sh4( extaddr, buf, length );
279 } else { /* Device to SH4 */
280 mem_copy_from_sh4( buf, extaddr, length );
281 mem_copy_to_sh4( sh4addr, buf, length );
283 MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
284 asic_event( EVENT_G2_DMA0 + channel );
286 MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
291 void asic_ide_dma_transfer( )
293 if( MMIO_READ( EXTDMA, IDEDMACTL2 ) == 1 ) {
294 if( MMIO_READ( EXTDMA, IDEDMACTL1 ) == 1 ) {
295 MMIO_WRITE( EXTDMA, IDEDMATXSIZ, 0 );
297 uint32_t addr = MMIO_READ( EXTDMA, IDEDMASH4 );
298 uint32_t length = MMIO_READ( EXTDMA, IDEDMASIZ );
299 // int dir = MMIO_READ( EXTDMA, IDEDMADIR );
301 uint32_t xfer = ide_read_data_dma( addr, length );
302 MMIO_WRITE( EXTDMA, IDEDMATXSIZ, xfer );
303 MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
305 MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
310 void pvr_dma_transfer( )
312 sh4addr_t destaddr = MMIO_READ( ASIC, PVRDMADEST) &0x1FFFFFE0;
313 uint32_t count = MMIO_READ( ASIC, PVRDMACNT );
314 unsigned char *data = alloca( count );
315 uint32_t rcount = DMAC_get_buffer( 2, data, count );
316 if( rcount != count )
317 WARN( "PVR received %08X bytes from DMA, expected %08X", rcount, count );
319 pvr2_dma_write( destaddr, data, rcount );
321 MMIO_WRITE( ASIC, PVRDMACTL, 0 );
322 MMIO_WRITE( ASIC, PVRDMACNT, 0 );
323 if( destaddr & 0x01000000 ) { /* Write to texture RAM */
324 MMIO_WRITE( ASIC, PVRDMADEST, destaddr + rcount );
326 asic_event( EVENT_PVR_DMA );
329 void mmio_region_ASIC_write( uint32_t reg, uint32_t val )
333 break; /* Treat this as read-only for the moment */
335 val = val & 0x3FFFFFFF; /* Top two bits aren't clearable */
336 MMIO_WRITE( ASIC, reg, MMIO_READ(ASIC, reg)&~val );
337 asic_check_cleared_events();
340 /* Clear any events */
341 val = MMIO_READ(ASIC, reg)&(~val);
342 MMIO_WRITE( ASIC, reg, val );
343 if( val == 0 ) { /* all clear - clear the cascade bit */
344 MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
346 asic_check_cleared_events();
349 if( val == 0x7611 ) {
352 WARN( "Unknown value %08X written to SYSRESET port", val );
356 MMIO_WRITE( ASIC, reg, val );
358 uint32_t maple_addr = MMIO_READ( ASIC, MAPLE_DMA) &0x1FFFFFE0;
359 maple_handle_buffer( maple_addr );
360 MMIO_WRITE( ASIC, reg, 0 );
364 MMIO_WRITE( ASIC, reg, (val & 0x03FFFFE0) | 0x10000000 );
367 MMIO_WRITE( ASIC, reg, val & 0x00FFFFE0 );
369 case PVRDMACTL: /* Initiate PVR DMA transfer */
371 MMIO_WRITE( ASIC, reg, val );
378 MMIO_WRITE( ASIC, reg, val );
381 MMIO_WRITE( ASIC, reg, val );
385 int32_t mmio_region_ASIC_read( uint32_t reg )
402 val = MMIO_READ(ASIC, reg);
405 return g2_read_status();
407 val = MMIO_READ(ASIC, reg);
413 MMIO_REGION_WRITE_FN( EXTDMA, reg, val )
415 if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
416 return; /* disabled */
420 case IDEALTSTATUS: /* Device control */
421 ide_write_control( val );
424 ide_write_data_pio( val );
427 if( ide_can_write_regs() )
428 idereg.feature = (uint8_t)val;
431 if( ide_can_write_regs() )
432 idereg.count = (uint8_t)val;
435 if( ide_can_write_regs() )
436 idereg.lba0 = (uint8_t)val;
439 if( ide_can_write_regs() )
440 idereg.lba1 = (uint8_t)val;
443 if( ide_can_write_regs() )
444 idereg.lba2 = (uint8_t)val;
447 if( ide_can_write_regs() )
448 idereg.device = (uint8_t)val;
451 if( ide_can_write_regs() || val == IDE_CMD_NOP ) {
452 ide_write_command( (uint8_t)val );
456 MMIO_WRITE( EXTDMA, reg, val & 0x1FFFFFE0 );
459 MMIO_WRITE( EXTDMA, reg, val & 0x01FFFFFE );
462 MMIO_WRITE( EXTDMA, reg, val & 1 );
466 MMIO_WRITE( EXTDMA, reg, val & 0x01 );
467 asic_ide_dma_transfer( );
470 if( val == 0x001FFFFF ) {
471 idereg.interface_enabled = TRUE;
472 /* Conventional wisdom says that this is necessary but not
473 * sufficient to enable the IDE interface.
475 } else if( val == 0x000042FE ) {
476 idereg.interface_enabled = FALSE;
479 case G2DMA0EXT: case G2DMA0SH4: case G2DMA0SIZ:
480 case G2DMA1EXT: case G2DMA1SH4: case G2DMA1SIZ:
481 case G2DMA2EXT: case G2DMA2SH4: case G2DMA2SIZ:
482 case G2DMA3EXT: case G2DMA3SH4: case G2DMA3SIZ:
483 MMIO_WRITE( EXTDMA, reg, val & 0x9FFFFFE0 );
485 case G2DMA0MOD: case G2DMA1MOD: case G2DMA2MOD: case G2DMA3MOD:
486 MMIO_WRITE( EXTDMA, reg, val & 0x07 );
488 case G2DMA0DIR: case G2DMA1DIR: case G2DMA2DIR: case G2DMA3DIR:
489 MMIO_WRITE( EXTDMA, reg, val & 0x01 );
493 MMIO_WRITE( EXTDMA, reg, val & 1);
494 g2_dma_transfer( 0 );
497 MMIO_WRITE( EXTDMA, reg, val & 0x37 );
501 MMIO_WRITE( EXTDMA, reg, val & 1);
502 g2_dma_transfer( 1 );
506 MMIO_WRITE( EXTDMA, reg, val & 0x37 );
510 MMIO_WRITE( EXTDMA, reg, val &1 );
511 g2_dma_transfer( 2 );
514 MMIO_WRITE( EXTDMA, reg, val & 0x37 );
518 MMIO_WRITE( EXTDMA, reg, val &1 );
519 g2_dma_transfer( 3 );
522 MMIO_WRITE( EXTDMA, reg, val & 0x37 );
527 ERROR( "Write to unimplemented DMA control register %08X", reg );
531 MMIO_WRITE( EXTDMA, reg, val );
535 MMIO_REGION_READ_FN( EXTDMA, reg )
538 if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
539 return 0xFFFFFFFF; /* disabled */
546 case IDEDATA: return ide_read_data_pio( );
547 case IDEFEAT: return idereg.error;
548 case IDECOUNT:return idereg.count;
549 case IDELBA0: return ide_get_drive_status();
550 case IDELBA1: return idereg.lba1;
551 case IDELBA2: return idereg.lba2;
552 case IDEDEV: return idereg.device;
554 val = ide_read_status();
557 val = MMIO_READ( EXTDMA, reg );
.