Search
lxdream.org :: lxdream/src/asic.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/asic.c
changeset 342:850502f0e8de
prev334:59073f812bd3
next422:61a0598e07ff
author nkeynes
date Wed Jan 31 11:01:36 2007 +0000 (17 years ago)
permissions -rw-r--r--
last change use ide.c in testregs
view annotate diff log raw
     1 /**
     2  * $Id: asic.c,v 1.28 2007-01-31 10:58:42 nkeynes Exp $
     3  *
     4  * Support for the miscellaneous ASIC functions (Primarily event multiplexing,
     5  * and DMA). 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #define MODULE asic_module
    22 #include <assert.h>
    23 #include <stdlib.h>
    24 #include "dream.h"
    25 #include "mem.h"
    26 #include "sh4/intc.h"
    27 #include "sh4/dmac.h"
    28 #include "dreamcast.h"
    29 #include "maple/maple.h"
    30 #include "gdrom/ide.h"
    31 #include "asic.h"
    32 #define MMIO_IMPL
    33 #include "asic.h"
    34 /*
    35  * Open questions:
    36  *   1) Does changing the mask after event occurance result in the
    37  *      interrupt being delivered immediately?
    38  * TODO: Logic diagram of ASIC event/interrupt logic.
    39  *
    40  * ... don't even get me started on the "EXTDMA" page, about which, apparently,
    41  * practically nothing is publicly known...
    42  */
    44 static void asic_check_cleared_events( void );
    45 static void asic_init( void );
    46 static void asic_reset( void );
    47 static uint32_t asic_run_slice( uint32_t nanosecs );
    48 static void asic_save_state( FILE *f );
    49 static int asic_load_state( FILE *f );
    50 static uint32_t g2_update_fifo_status( uint32_t slice_cycle );
    52 struct dreamcast_module asic_module = { "ASIC", asic_init, asic_reset, NULL, asic_run_slice,
    53 					NULL, asic_save_state, asic_load_state };
    55 #define G2_BIT5_TICKS 60
    56 #define G2_BIT4_TICKS 160
    57 #define G2_BIT0_ON_TICKS 120
    58 #define G2_BIT0_OFF_TICKS 420
    60 struct asic_g2_state {
    61     int bit5_off_timer;
    62     int bit4_on_timer;
    63     int bit4_off_timer;
    64     int bit0_on_timer;
    65     int bit0_off_timer;
    66 };
    68 static struct asic_g2_state g2_state;
    70 static uint32_t asic_run_slice( uint32_t nanosecs )
    71 {
    72     g2_update_fifo_status(nanosecs);
    73     if( g2_state.bit5_off_timer <= (int32_t)nanosecs ) {
    74 	g2_state.bit5_off_timer = -1;
    75     } else {
    76 	g2_state.bit5_off_timer -= nanosecs;
    77     }
    79     if( g2_state.bit4_off_timer <= (int32_t)nanosecs ) {
    80 	g2_state.bit4_off_timer = -1;
    81     } else {
    82 	g2_state.bit4_off_timer -= nanosecs;
    83     }
    84     if( g2_state.bit4_on_timer <= (int32_t)nanosecs ) {
    85 	g2_state.bit4_on_timer = -1;
    86     } else {
    87 	g2_state.bit4_on_timer -= nanosecs;
    88     }
    90     if( g2_state.bit0_off_timer <= (int32_t)nanosecs ) {
    91 	g2_state.bit0_off_timer = -1;
    92     } else {
    93 	g2_state.bit0_off_timer -= nanosecs;
    94     }
    95     if( g2_state.bit0_on_timer <= (int32_t)nanosecs ) {
    96 	g2_state.bit0_on_timer = -1;
    97     } else {
    98 	g2_state.bit0_on_timer -= nanosecs;
    99     }
   101     return nanosecs;
   102 }
   104 static void asic_init( void )
   105 {
   106     register_io_region( &mmio_region_ASIC );
   107     register_io_region( &mmio_region_EXTDMA );
   108     asic_reset();
   109 }
   111 static void asic_reset( void )
   112 {
   113     memset( &g2_state, 0xFF, sizeof(g2_state) );
   114 }    
   116 static void asic_save_state( FILE *f )
   117 {
   118     fwrite( &g2_state, sizeof(g2_state), 1, f );
   119 }
   121 static int asic_load_state( FILE *f )
   122 {
   123     if( fread( &g2_state, sizeof(g2_state), 1, f ) != 1 )
   124 	return 1;
   125     else
   126 	return 0;
   127 }
   130 /**
   131  * Setup the timers for the 3 FIFO status bits following a write through the G2
   132  * bus from the SH4 side. The timing is roughly as follows: (times are
   133  * approximate based on software readings - I wouldn't take this as gospel but
   134  * it seems to be enough to fool most programs). 
   135  *    0ns: Bit 5 (Input fifo?) goes high immediately on the write
   136  *   40ns: Bit 5 goes low and bit 4 goes high
   137  *  120ns: Bit 4 goes low, bit 0 goes high
   138  *  240ns: Bit 0 goes low.
   139  *
   140  * Additional writes while the FIFO is in operation extend the time that the
   141  * bits remain high as one might expect, without altering the time at which
   142  * they initially go high.
   143  */
   144 void asic_g2_write_word()
   145 {
   146     if( g2_state.bit5_off_timer < (int32_t)sh4r.slice_cycle ) {
   147 	g2_state.bit5_off_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
   148     } else {
   149 	g2_state.bit5_off_timer += G2_BIT5_TICKS;
   150     }
   152     if( g2_state.bit4_on_timer < (int32_t)sh4r.slice_cycle ) {
   153 	g2_state.bit4_on_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
   154     }
   156     if( g2_state.bit4_off_timer < (int32_t)sh4r.slice_cycle ) {
   157 	g2_state.bit4_off_timer = g2_state.bit4_on_timer + G2_BIT4_TICKS;
   158     } else {
   159 	g2_state.bit4_off_timer += G2_BIT4_TICKS;
   160     }
   162     if( g2_state.bit0_on_timer < (int32_t)sh4r.slice_cycle ) {
   163 	g2_state.bit0_on_timer = sh4r.slice_cycle + G2_BIT0_ON_TICKS;
   164     }
   166     if( g2_state.bit0_off_timer < (int32_t)sh4r.slice_cycle ) {
   167 	g2_state.bit0_off_timer = g2_state.bit0_on_timer + G2_BIT0_OFF_TICKS;
   168     } else {
   169 	g2_state.bit0_off_timer += G2_BIT0_OFF_TICKS;
   170     }
   172     MMIO_WRITE( ASIC, G2STATUS, MMIO_READ(ASIC, G2STATUS) | 0x20 );
   173 }
   175 static uint32_t g2_update_fifo_status( uint32_t nanos )
   176 {
   177     uint32_t val = MMIO_READ( ASIC, G2STATUS );
   178     if( ((uint32_t)g2_state.bit5_off_timer) <= nanos ) {
   179 	val = val & (~0x20);
   180 	g2_state.bit5_off_timer = -1;
   181     }
   182     if( ((uint32_t)g2_state.bit4_on_timer) <= nanos ) {
   183 	val = val | 0x10;
   184 	g2_state.bit4_on_timer = -1;
   185     }
   186     if( ((uint32_t)g2_state.bit4_off_timer) <= nanos ) {
   187 	val = val & (~0x10);
   188 	g2_state.bit4_off_timer = -1;
   189     } 
   191     if( ((uint32_t)g2_state.bit0_on_timer) <= nanos ) {
   192 	val = val | 0x01;
   193 	g2_state.bit0_on_timer = -1;
   194     }
   195     if( ((uint32_t)g2_state.bit0_off_timer) <= nanos ) {
   196 	val = val & (~0x01);
   197 	g2_state.bit0_off_timer = -1;
   198     } 
   200     MMIO_WRITE( ASIC, G2STATUS, val );
   201     return val;
   202 }   
   204 static int g2_read_status() {
   205     return g2_update_fifo_status( sh4r.slice_cycle );
   206 }
   209 void asic_event( int event )
   210 {
   211     int offset = ((event&0x60)>>3);
   212     int result = (MMIO_READ(ASIC, PIRQ0 + offset))  |=  (1<<(event&0x1F));
   214     if( result & MMIO_READ(ASIC, IRQA0 + offset) )
   215         intc_raise_interrupt( INT_IRQ13 );
   216     if( result & MMIO_READ(ASIC, IRQB0 + offset) )
   217         intc_raise_interrupt( INT_IRQ11 );
   218     if( result & MMIO_READ(ASIC, IRQC0 + offset) )
   219         intc_raise_interrupt( INT_IRQ9 );
   221     if( event >= 64 ) { /* Third word */
   222 	asic_event( EVENT_CASCADE2 );
   223     } else if( event >= 32 ) { /* Second word */
   224 	asic_event( EVENT_CASCADE1 );
   225     }
   226 }
   228 void asic_clear_event( int event ) {
   229     int offset = ((event&0x60)>>3);
   230     uint32_t result = MMIO_READ(ASIC, PIRQ0 + offset)  & (~(1<<(event&0x1F)));
   231     MMIO_WRITE( ASIC, PIRQ0 + offset, result );
   232     if( result == 0 ) {
   233 	/* clear cascades if necessary */
   234 	if( event >= 64 ) {
   235 	    MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
   236 	} else if( event >= 32 ) {
   237 	    MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0xBFFFFFFF );
   238 	}
   239     }
   241     asic_check_cleared_events();
   242 }
   244 void asic_check_cleared_events( )
   245 {
   246     int i, setA = 0, setB = 0, setC = 0;
   247     uint32_t bits;
   248     for( i=0; i<3; i++ ) {
   249 	bits = MMIO_READ( ASIC, PIRQ0 + i );
   250 	setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
   251 	setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
   252 	setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
   253     }
   254     if( setA == 0 )
   255 	intc_clear_interrupt( INT_IRQ13 );
   256     if( setB == 0 )
   257 	intc_clear_interrupt( INT_IRQ11 );
   258     if( setC == 0 )
   259 	intc_clear_interrupt( INT_IRQ9 );
   260 }
   262 void g2_dma_transfer( int channel )
   263 {
   264     uint32_t offset = channel << 5;
   266     if( MMIO_READ( EXTDMA, G2DMA0CTL1 + offset ) == 1 ) {
   267 	if( MMIO_READ( EXTDMA, G2DMA0CTL2 + offset ) == 1 ) {
   268 	    uint32_t extaddr = MMIO_READ( EXTDMA, G2DMA0EXT + offset );
   269 	    uint32_t sh4addr = MMIO_READ( EXTDMA, G2DMA0SH4 + offset );
   270 	    uint32_t length = MMIO_READ( EXTDMA, G2DMA0SIZ + offset ) & 0x1FFFFFFF;
   271 	    uint32_t dir = MMIO_READ( EXTDMA, G2DMA0DIR + offset );
   272 	    uint32_t mode = MMIO_READ( EXTDMA, G2DMA0MOD + offset );
   273 	    char buf[length];
   274 	    if( dir == 0 ) { /* SH4 to device */
   275 		mem_copy_from_sh4( buf, sh4addr, length );
   276 		mem_copy_to_sh4( extaddr, buf, length );
   277 	    } else { /* Device to SH4 */
   278 		mem_copy_from_sh4( buf, extaddr, length );
   279 		mem_copy_to_sh4( sh4addr, buf, length );
   280 	    }
   281 	    MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
   282 	    asic_event( EVENT_G2_DMA0 + channel );
   283 	} else {
   284 	    MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
   285 	}
   286     }
   287 }
   289 void asic_ide_dma_transfer( )
   290 {	
   291     if( MMIO_READ( EXTDMA, IDEDMACTL2 ) == 1 ) {
   292 	if( MMIO_READ( EXTDMA, IDEDMACTL1 ) == 1 ) {
   293 	    MMIO_WRITE( EXTDMA, IDEDMATXSIZ, 0 );
   295 	    uint32_t addr = MMIO_READ( EXTDMA, IDEDMASH4 );
   296 	    uint32_t length = MMIO_READ( EXTDMA, IDEDMASIZ );
   297 	    int dir = MMIO_READ( EXTDMA, IDEDMADIR );
   299 	    uint32_t xfer = ide_read_data_dma( addr, length );
   300 	    MMIO_WRITE( EXTDMA, IDEDMATXSIZ, xfer );
   301 	    MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
   302 	} else { /* 0 */
   303 	    MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
   304 	}
   305     }
   306 }
   308 void pvr_dma_transfer( )
   309 {
   310     sh4addr_t destaddr = MMIO_READ( ASIC, PVRDMADEST) &0x1FFFFFE0;
   311     uint32_t count = MMIO_READ( ASIC, PVRDMACNT );
   312     char *data = alloca( count );
   313     uint32_t rcount = DMAC_get_buffer( 2, data, count );
   314     if( rcount != count )
   315 	WARN( "PVR received %08X bytes from DMA, expected %08X", rcount, count );
   317     pvr2_dma_write( destaddr, data, rcount );
   319     MMIO_WRITE( ASIC, PVRDMACTL, 0 );
   320     MMIO_WRITE( ASIC, PVRDMACNT, 0 );
   321     if( destaddr & 0x01000000 ) { /* Write to texture RAM */
   322 	MMIO_WRITE( ASIC, PVRDMADEST, destaddr + rcount );
   323     }
   324     asic_event( EVENT_PVR_DMA );
   325 }
   327 void mmio_region_ASIC_write( uint32_t reg, uint32_t val )
   328 {
   329     switch( reg ) {
   330     case PIRQ1:
   331 	break; /* Treat this as read-only for the moment */
   332     case PIRQ0:
   333 	val = val & 0x3FFFFFFF; /* Top two bits aren't clearable */
   334 	MMIO_WRITE( ASIC, reg, MMIO_READ(ASIC, reg)&~val );
   335 	asic_check_cleared_events();
   336 	break;
   337     case PIRQ2:
   338 	/* Clear any events */
   339 	val = MMIO_READ(ASIC, reg)&(~val);
   340 	MMIO_WRITE( ASIC, reg, val );
   341 	if( val == 0 ) { /* all clear - clear the cascade bit */
   342 	    MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
   343 	}
   344 	asic_check_cleared_events();
   345 	break;
   346     case SYSRESET:
   347 	if( val == 0x7611 ) {
   348 	    dreamcast_reset();
   349 	    sh4r.new_pc = sh4r.pc;
   350 	} else {
   351 	    WARN( "Unknown value %08X written to SYSRESET port", val );
   352 	}
   353 	break;
   354     case MAPLE_STATE:
   355 	MMIO_WRITE( ASIC, reg, val );
   356 	if( val & 1 ) {
   357 	    uint32_t maple_addr = MMIO_READ( ASIC, MAPLE_DMA) &0x1FFFFFE0;
   358 	    maple_handle_buffer( maple_addr );
   359 	    MMIO_WRITE( ASIC, reg, 0 );
   360 	}
   361 	break;
   362     case PVRDMADEST:
   363 	MMIO_WRITE( ASIC, reg, (val & 0x03FFFFE0) | 0x10000000 );
   364 	break;
   365     case PVRDMACNT: 
   366 	MMIO_WRITE( ASIC, reg, val & 0x00FFFFE0 );
   367 	break;
   368     case PVRDMACTL: /* Initiate PVR DMA transfer */
   369 	val = val & 0x01;
   370 	MMIO_WRITE( ASIC, reg, val );
   371 	if( val == 1 ) {
   372 	    pvr_dma_transfer();
   373 	}
   374 	break;
   375     case MAPLE_DMA:
   376 	MMIO_WRITE( ASIC, reg, val );
   377 	break;
   378     default:
   379 	MMIO_WRITE( ASIC, reg, val );
   380     }
   381 }
   383 int32_t mmio_region_ASIC_read( uint32_t reg )
   384 {
   385     int32_t val;
   386     switch( reg ) {
   387         /*
   388         case 0x89C:
   389             sh4_stop();
   390             return 0x000000B;
   391         */     
   392     case PIRQ0:
   393     case PIRQ1:
   394     case PIRQ2:
   395     case IRQA0:
   396     case IRQA1:
   397     case IRQA2:
   398     case IRQB0:
   399     case IRQB1:
   400     case IRQB2:
   401     case IRQC0:
   402     case IRQC1:
   403     case IRQC2:
   404     case MAPLE_STATE:
   405 	val = MMIO_READ(ASIC, reg);
   406 	return val;            
   407     case G2STATUS:
   408 	return g2_read_status();
   409     default:
   410 	val = MMIO_READ(ASIC, reg);
   411 	return val;
   412     }
   414 }
   416 MMIO_REGION_WRITE_FN( EXTDMA, reg, val )
   417 {
   418     if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
   419 	return; /* disabled */
   420     }
   422     switch( reg ) {
   423     case IDEALTSTATUS: /* Device control */
   424 	ide_write_control( val );
   425 	break;
   426     case IDEDATA:
   427 	ide_write_data_pio( val );
   428 	break;
   429     case IDEFEAT:
   430 	if( ide_can_write_regs() )
   431 	    idereg.feature = (uint8_t)val;
   432 	break;
   433     case IDECOUNT:
   434 	if( ide_can_write_regs() )
   435 	    idereg.count = (uint8_t)val;
   436 	break;
   437     case IDELBA0:
   438 	if( ide_can_write_regs() )
   439 	    idereg.lba0 = (uint8_t)val;
   440 	break;
   441     case IDELBA1:
   442 	if( ide_can_write_regs() )
   443 	    idereg.lba1 = (uint8_t)val;
   444 	break;
   445     case IDELBA2:
   446 	if( ide_can_write_regs() )
   447 	    idereg.lba2 = (uint8_t)val;
   448 	break;
   449     case IDEDEV:
   450 	if( ide_can_write_regs() )
   451 	    idereg.device = (uint8_t)val;
   452 	break;
   453     case IDECMD:
   454 	if( ide_can_write_regs() || val == IDE_CMD_NOP ) {
   455 	    ide_write_command( (uint8_t)val );
   456 	}
   457 	break;
   458     case IDEDMASH4:
   459 	MMIO_WRITE( EXTDMA, reg, val & 0x1FFFFFE0 );
   460 	break;
   461     case IDEDMASIZ:
   462 	MMIO_WRITE( EXTDMA, reg, val & 0x01FFFFFE );
   463 	break;
   464     case IDEDMACTL1:
   465     case IDEDMACTL2:
   466 	MMIO_WRITE( EXTDMA, reg, val & 0x01 );
   467 	asic_ide_dma_transfer( );
   468 	break;
   469     case IDEACTIVATE:
   470 	if( val == 0x001FFFFF ) {
   471 	    idereg.interface_enabled = TRUE;
   472 	    /* Conventional wisdom says that this is necessary but not
   473 	     * sufficient to enable the IDE interface.
   474 	     */
   475 	} else if( val == 0x000042FE ) {
   476 	    idereg.interface_enabled = FALSE;
   477 	}
   478 	break;
   479     case G2DMA0CTL1:
   480     case G2DMA0CTL2:
   481 	MMIO_WRITE( EXTDMA, reg, val );
   482 	g2_dma_transfer( 0 );
   483 	break;
   484     case G2DMA0STOP:
   485 	break;
   486     case G2DMA1CTL1:
   487     case G2DMA1CTL2:
   488 	MMIO_WRITE( EXTDMA, reg, val );
   489 	g2_dma_transfer( 1 );
   490 	break;
   492     case G2DMA1STOP:
   493 	break;
   494     case G2DMA2CTL1:
   495     case G2DMA2CTL2:
   496 	MMIO_WRITE( EXTDMA, reg, val );
   497 	g2_dma_transfer( 2 );
   498 	break;
   499     case G2DMA2STOP:
   500 	break;
   501     case G2DMA3CTL1:
   502     case G2DMA3CTL2:
   503 	MMIO_WRITE( EXTDMA, reg, val );
   504 	g2_dma_transfer( 3 );
   505 	break;
   506     case G2DMA3STOP:
   507 	break;
   508     case PVRDMA2CTL1:
   509     case PVRDMA2CTL2:
   510 	if( val != 0 ) {
   511 	    ERROR( "Write to unimplemented DMA control register %08X", reg );
   512 	    //dreamcast_stop();
   513 	    //sh4_stop();
   514 	}
   515 	break;
   516     default:
   517             MMIO_WRITE( EXTDMA, reg, val );
   518     }
   519 }
   521 MMIO_REGION_READ_FN( EXTDMA, reg )
   522 {
   523     uint32_t val;
   524     if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
   525 	return 0xFFFFFFFF; /* disabled */
   526     }
   528     switch( reg ) {
   529     case IDEALTSTATUS: 
   530 	val = idereg.status;
   531 	return val;
   532     case IDEDATA: return ide_read_data_pio( );
   533     case IDEFEAT: return idereg.error;
   534     case IDECOUNT:return idereg.count;
   535     case IDELBA0: return ide_get_drive_status();
   536     case IDELBA1: return idereg.lba1;
   537     case IDELBA2: return idereg.lba2;
   538     case IDEDEV: return idereg.device;
   539     case IDECMD:
   540 	val = ide_read_status();
   541 	return val;
   542     default:
   543 	val = MMIO_READ( EXTDMA, reg );
   544 	return val;
   545     }
   546 }
.