Search
lxdream.org :: lxdream/src/asic.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/asic.c
changeset 305:1191085c5988
prev302:96b5cc24309c
next325:5717ae5d4746
author nkeynes
date Thu Jan 18 11:14:01 2007 +0000 (14 years ago)
permissions -rw-r--r--
last change Rearrange asic cascade events
view annotate diff log raw
     1 /**
     2  * $Id: asic.c,v 1.25 2007-01-18 11:14:01 nkeynes Exp $
     3  *
     4  * Support for the miscellaneous ASIC functions (Primarily event multiplexing,
     5  * and DMA). 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #define MODULE asic_module
    22 #include <assert.h>
    23 #include <stdlib.h>
    24 #include "dream.h"
    25 #include "mem.h"
    26 #include "sh4/intc.h"
    27 #include "sh4/dmac.h"
    28 #include "dreamcast.h"
    29 #include "maple/maple.h"
    30 #include "gdrom/ide.h"
    31 #include "asic.h"
    32 #define MMIO_IMPL
    33 #include "asic.h"
    34 /*
    35  * Open questions:
    36  *   1) Does changing the mask after event occurance result in the
    37  *      interrupt being delivered immediately?
    38  * TODO: Logic diagram of ASIC event/interrupt logic.
    39  *
    40  * ... don't even get me started on the "EXTDMA" page, about which, apparently,
    41  * practically nothing is publicly known...
    42  */
    44 static void asic_check_cleared_events( void );
    45 static void asic_init( void );
    46 static void asic_reset( void );
    47 static uint32_t asic_run_slice( uint32_t nanosecs );
    48 static void asic_save_state( FILE *f );
    49 static int asic_load_state( FILE *f );
    50 static uint32_t g2_update_fifo_status( uint32_t slice_cycle );
    52 struct dreamcast_module asic_module = { "ASIC", asic_init, asic_reset, NULL, asic_run_slice,
    53 					NULL, asic_save_state, asic_load_state };
    55 #define G2_BIT5_TICKS 60
    56 #define G2_BIT4_TICKS 160
    57 #define G2_BIT0_ON_TICKS 120
    58 #define G2_BIT0_OFF_TICKS 420
    60 struct asic_g2_state {
    61     int bit5_off_timer;
    62     int bit4_on_timer;
    63     int bit4_off_timer;
    64     int bit0_on_timer;
    65     int bit0_off_timer;
    66 };
    68 static struct asic_g2_state g2_state;
    70 static uint32_t asic_run_slice( uint32_t nanosecs )
    71 {
    72     g2_update_fifo_status(nanosecs);
    73     if( g2_state.bit5_off_timer <= (int32_t)nanosecs ) {
    74 	g2_state.bit5_off_timer = -1;
    75     } else {
    76 	g2_state.bit5_off_timer -= nanosecs;
    77     }
    79     if( g2_state.bit4_off_timer <= (int32_t)nanosecs ) {
    80 	g2_state.bit4_off_timer = -1;
    81     } else {
    82 	g2_state.bit4_off_timer -= nanosecs;
    83     }
    84     if( g2_state.bit4_on_timer <= (int32_t)nanosecs ) {
    85 	g2_state.bit4_on_timer = -1;
    86     } else {
    87 	g2_state.bit4_on_timer -= nanosecs;
    88     }
    90     if( g2_state.bit0_off_timer <= (int32_t)nanosecs ) {
    91 	g2_state.bit0_off_timer = -1;
    92     } else {
    93 	g2_state.bit0_off_timer -= nanosecs;
    94     }
    95     if( g2_state.bit0_on_timer <= (int32_t)nanosecs ) {
    96 	g2_state.bit0_on_timer = -1;
    97     } else {
    98 	g2_state.bit0_on_timer -= nanosecs;
    99     }
   101     return nanosecs;
   102 }
   104 static void asic_init( void )
   105 {
   106     register_io_region( &mmio_region_ASIC );
   107     register_io_region( &mmio_region_EXTDMA );
   108     asic_reset();
   109 }
   111 static void asic_reset( void )
   112 {
   113     memset( &g2_state, 0xFF, sizeof(g2_state) );
   114 }    
   116 static void asic_save_state( FILE *f )
   117 {
   118     fwrite( &g2_state, sizeof(g2_state), 1, f );
   119 }
   121 static int asic_load_state( FILE *f )
   122 {
   123     if( fread( &g2_state, sizeof(g2_state), 1, f ) != 1 )
   124 	return 1;
   125     else
   126 	return 0;
   127 }
   130 /**
   131  * Setup the timers for the 3 FIFO status bits following a write through the G2
   132  * bus from the SH4 side. The timing is roughly as follows: (times are
   133  * approximate based on software readings - I wouldn't take this as gospel but
   134  * it seems to be enough to fool most programs). 
   135  *    0ns: Bit 5 (Input fifo?) goes high immediately on the write
   136  *   40ns: Bit 5 goes low and bit 4 goes high
   137  *  120ns: Bit 4 goes low, bit 0 goes high
   138  *  240ns: Bit 0 goes low.
   139  *
   140  * Additional writes while the FIFO is in operation extend the time that the
   141  * bits remain high as one might expect, without altering the time at which
   142  * they initially go high.
   143  */
   144 void asic_g2_write_word()
   145 {
   146     if( g2_state.bit5_off_timer < (int32_t)sh4r.slice_cycle ) {
   147 	g2_state.bit5_off_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
   148     } else {
   149 	g2_state.bit5_off_timer += G2_BIT5_TICKS;
   150     }
   152     if( g2_state.bit4_on_timer < (int32_t)sh4r.slice_cycle ) {
   153 	g2_state.bit4_on_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
   154     }
   156     if( g2_state.bit4_off_timer < (int32_t)sh4r.slice_cycle ) {
   157 	g2_state.bit4_off_timer = g2_state.bit4_on_timer + G2_BIT4_TICKS;
   158     } else {
   159 	g2_state.bit4_off_timer += G2_BIT4_TICKS;
   160     }
   162     if( g2_state.bit0_on_timer < (int32_t)sh4r.slice_cycle ) {
   163 	g2_state.bit0_on_timer = sh4r.slice_cycle + G2_BIT0_ON_TICKS;
   164     }
   166     if( g2_state.bit0_off_timer < (int32_t)sh4r.slice_cycle ) {
   167 	g2_state.bit0_off_timer = g2_state.bit0_on_timer + G2_BIT0_OFF_TICKS;
   168     } else {
   169 	g2_state.bit0_off_timer += G2_BIT0_OFF_TICKS;
   170     }
   172     MMIO_WRITE( ASIC, G2STATUS, MMIO_READ(ASIC, G2STATUS) | 0x20 );
   173 }
   175 static uint32_t g2_update_fifo_status( uint32_t nanos )
   176 {
   177     uint32_t val = MMIO_READ( ASIC, G2STATUS );
   178     if( ((uint32_t)g2_state.bit5_off_timer) <= nanos ) {
   179 	val = val & (~0x20);
   180 	g2_state.bit5_off_timer = -1;
   181     }
   182     if( ((uint32_t)g2_state.bit4_on_timer) <= nanos ) {
   183 	val = val | 0x10;
   184 	g2_state.bit4_on_timer = -1;
   185     }
   186     if( ((uint32_t)g2_state.bit4_off_timer) <= nanos ) {
   187 	val = val & (~0x10);
   188 	g2_state.bit4_off_timer = -1;
   189     } 
   191     if( ((uint32_t)g2_state.bit0_on_timer) <= nanos ) {
   192 	val = val | 0x01;
   193 	g2_state.bit0_on_timer = -1;
   194     }
   195     if( ((uint32_t)g2_state.bit0_off_timer) <= nanos ) {
   196 	val = val & (~0x01);
   197 	g2_state.bit0_off_timer = -1;
   198     } 
   200     MMIO_WRITE( ASIC, G2STATUS, val );
   201     return val;
   202 }   
   204 static int g2_read_status() {
   205     return g2_update_fifo_status( sh4r.slice_cycle );
   206 }
   209 void asic_event( int event )
   210 {
   211     int offset = ((event&0x60)>>3);
   212     int result = (MMIO_READ(ASIC, PIRQ0 + offset))  |=  (1<<(event&0x1F));
   214     if( result & MMIO_READ(ASIC, IRQA0 + offset) )
   215         intc_raise_interrupt( INT_IRQ13 );
   216     if( result & MMIO_READ(ASIC, IRQB0 + offset) )
   217         intc_raise_interrupt( INT_IRQ11 );
   218     if( result & MMIO_READ(ASIC, IRQC0 + offset) )
   219         intc_raise_interrupt( INT_IRQ9 );
   221     if( event >= 64 ) { /* Third word */
   222 	asic_event( EVENT_CASCADE2 );
   223     } else if( event >= 32 ) { /* Second word */
   224 	asic_event( EVENT_CASCADE1 );
   225     }
   226 }
   228 void asic_clear_event( int event ) {
   229     int offset = ((event&0x60)>>3);
   230     uint32_t result = MMIO_READ(ASIC, PIRQ0 + offset)  & (~(1<<(event&0x1F)));
   231     MMIO_WRITE( ASIC, PIRQ0 + offset, result );
   232     if( result == 0 ) {
   233 	/* clear cascades if necessary */
   234 	if( event >= 64 ) {
   235 	    MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
   236 	} else if( event >= 32 ) {
   237 	    MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0xBFFFFFFF );
   238 	}
   239     }
   241     asic_check_cleared_events();
   242 }
   244 void asic_check_cleared_events( )
   245 {
   246     int i, setA = 0, setB = 0, setC = 0;
   247     uint32_t bits;
   248     for( i=0; i<3; i++ ) {
   249 	bits = MMIO_READ( ASIC, PIRQ0 + i );
   250 	setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
   251 	setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
   252 	setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
   253     }
   254     if( setA == 0 )
   255 	intc_clear_interrupt( INT_IRQ13 );
   256     if( setB == 0 )
   257 	intc_clear_interrupt( INT_IRQ11 );
   258     if( setC == 0 )
   259 	intc_clear_interrupt( INT_IRQ9 );
   260 }
   262 void g2_dma_transfer( int channel )
   263 {
   264     uint32_t offset = channel << 5;
   266     if( MMIO_READ( EXTDMA, G2DMA0CTL1 + offset ) == 1 ) {
   267 	if( MMIO_READ( EXTDMA, G2DMA0CTL2 + offset ) == 1 ) {
   268 	    uint32_t extaddr = MMIO_READ( EXTDMA, G2DMA0EXT + offset );
   269 	    uint32_t sh4addr = MMIO_READ( EXTDMA, G2DMA0SH4 + offset );
   270 	    uint32_t length = MMIO_READ( EXTDMA, G2DMA0SIZ + offset ) & 0x1FFFFFFF;
   271 	    uint32_t dir = MMIO_READ( EXTDMA, G2DMA0DIR + offset );
   272 	    uint32_t mode = MMIO_READ( EXTDMA, G2DMA0MOD + offset );
   273 	    char buf[length];
   274 	    if( dir == 0 ) { /* SH4 to device */
   275 		mem_copy_from_sh4( buf, sh4addr, length );
   276 		mem_copy_to_sh4( extaddr, buf, length );
   277 	    } else { /* Device to SH4 */
   278 		mem_copy_from_sh4( buf, extaddr, length );
   279 		mem_copy_to_sh4( sh4addr, buf, length );
   280 	    }
   281 	    MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
   282 	    asic_event( EVENT_G2_DMA0 + channel );
   283 	} else {
   284 	    MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
   285 	}
   286     }
   287 }
   289 void asic_ide_dma_transfer( )
   290 {	
   291     if( MMIO_READ( EXTDMA, IDEDMACTL2 ) == 1 ) {
   292 	if( MMIO_READ( EXTDMA, IDEDMACTL1 ) == 1 ) {
   293 	    MMIO_WRITE( EXTDMA, IDEDMATXSIZ, 0 );
   295 	    uint32_t addr = MMIO_READ( EXTDMA, IDEDMASH4 );
   296 	    uint32_t length = MMIO_READ( EXTDMA, IDEDMASIZ );
   297 	    int dir = MMIO_READ( EXTDMA, IDEDMADIR );
   299 	    uint32_t xfer = ide_read_data_dma( addr, length );
   300 	    MMIO_WRITE( EXTDMA, IDEDMATXSIZ, xfer );
   301 	    MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
   302 	} else { /* 0 */
   303 	    MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
   304 	}
   305     }
   307 }
   310 void mmio_region_ASIC_write( uint32_t reg, uint32_t val )
   311 {
   312     switch( reg ) {
   313     case PIRQ1:
   314 	break; /* Treat this as read-only for the moment */
   315     case PIRQ0:
   316 	val = val & 0x3FFFFFFF; /* Top two bits aren't clearable */
   317 	MMIO_WRITE( ASIC, reg, MMIO_READ(ASIC, reg)&~val );
   318 	asic_check_cleared_events();
   319 	break;
   320     case PIRQ2:
   321 	/* Clear any events */
   322 	val = MMIO_READ(ASIC, reg)&(~val);
   323 	MMIO_WRITE( ASIC, reg, val );
   324 	if( val == 0 ) { /* all clear - clear the cascade bit */
   325 	    MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
   326 	}
   327 	asic_check_cleared_events();
   328 	break;
   329     case SYSRESET:
   330 	if( val == 0x7611 ) {
   331 	    dreamcast_reset();
   332 	    sh4r.new_pc = sh4r.pc;
   333 	} else {
   334 	    WARN( "Unknown value %08X written to SYSRESET port", val );
   335 	}
   336 	break;
   337     case MAPLE_STATE:
   338 	MMIO_WRITE( ASIC, reg, val );
   339 	if( val & 1 ) {
   340 	    uint32_t maple_addr = MMIO_READ( ASIC, MAPLE_DMA) &0x1FFFFFE0;
   341 	    maple_handle_buffer( maple_addr );
   342 	    MMIO_WRITE( ASIC, reg, 0 );
   343 	}
   344 	break;
   345     case PVRDMACTL: /* Initiate PVR DMA transfer */
   346 	MMIO_WRITE( ASIC, reg, val );
   347 	if( val & 1 ) {
   348 	    uint32_t dest_addr = MMIO_READ( ASIC, PVRDMADEST) &0x1FFFFFE0;
   349 	    uint32_t count = MMIO_READ( ASIC, PVRDMACNT );
   350 	    char *data = alloca( count );
   351 	    uint32_t rcount = DMAC_get_buffer( 2, data, count );
   352 	    if( rcount != count )
   353 		WARN( "PVR received %08X bytes from DMA, expected %08X", rcount, count );
   354 	    mem_copy_to_sh4( dest_addr, data, rcount );
   355 	    asic_event( EVENT_PVR_DMA );
   356 	    MMIO_WRITE( ASIC, PVRDMACTL, 0 );
   357 	    MMIO_WRITE( ASIC, PVRDMACNT, 0 );
   358 	}
   359 	break;
   360     case PVRDMADEST: case PVRDMACNT: case MAPLE_DMA:
   361 	MMIO_WRITE( ASIC, reg, val );
   362 	break;
   363     default:
   364 	MMIO_WRITE( ASIC, reg, val );
   365     }
   366 }
   368 int32_t mmio_region_ASIC_read( uint32_t reg )
   369 {
   370     int32_t val;
   371     switch( reg ) {
   372         /*
   373         case 0x89C:
   374             sh4_stop();
   375             return 0x000000B;
   376         */     
   377     case PIRQ0:
   378     case PIRQ1:
   379     case PIRQ2:
   380     case IRQA0:
   381     case IRQA1:
   382     case IRQA2:
   383     case IRQB0:
   384     case IRQB1:
   385     case IRQB2:
   386     case IRQC0:
   387     case IRQC1:
   388     case IRQC2:
   389     case MAPLE_STATE:
   390 	val = MMIO_READ(ASIC, reg);
   391 	return val;            
   392     case G2STATUS:
   393 	return g2_read_status();
   394     default:
   395 	val = MMIO_READ(ASIC, reg);
   396 	return val;
   397     }
   399 }
   401 MMIO_REGION_WRITE_FN( EXTDMA, reg, val )
   402 {
   403     if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
   404 	return; /* disabled */
   405     }
   407     switch( reg ) {
   408     case IDEALTSTATUS: /* Device control */
   409 	ide_write_control( val );
   410 	break;
   411     case IDEDATA:
   412 	ide_write_data_pio( val );
   413 	break;
   414     case IDEFEAT:
   415 	if( ide_can_write_regs() )
   416 	    idereg.feature = (uint8_t)val;
   417 	break;
   418     case IDECOUNT:
   419 	if( ide_can_write_regs() )
   420 	    idereg.count = (uint8_t)val;
   421 	break;
   422     case IDELBA0:
   423 	if( ide_can_write_regs() )
   424 	    idereg.lba0 = (uint8_t)val;
   425 	break;
   426     case IDELBA1:
   427 	if( ide_can_write_regs() )
   428 	    idereg.lba1 = (uint8_t)val;
   429 	break;
   430     case IDELBA2:
   431 	if( ide_can_write_regs() )
   432 	    idereg.lba2 = (uint8_t)val;
   433 	break;
   434     case IDEDEV:
   435 	if( ide_can_write_regs() )
   436 	    idereg.device = (uint8_t)val;
   437 	break;
   438     case IDECMD:
   439 	if( ide_can_write_regs() || val == IDE_CMD_NOP ) {
   440 	    ide_write_command( (uint8_t)val );
   441 	}
   442 	break;
   443     case IDEDMACTL1:
   444     case IDEDMACTL2:
   445 	MMIO_WRITE( EXTDMA, reg, val );
   446 	asic_ide_dma_transfer( );
   447 	break;
   448     case IDEACTIVATE:
   449 	if( val == 0x001FFFFF ) {
   450 	    idereg.interface_enabled = TRUE;
   451 	    /* Conventional wisdom says that this is necessary but not
   452 	     * sufficient to enable the IDE interface.
   453 	     */
   454 	} else if( val == 0x000042FE ) {
   455 	    idereg.interface_enabled = FALSE;
   456 	}
   457 	break;
   458     case G2DMA0CTL1:
   459     case G2DMA0CTL2:
   460 	MMIO_WRITE( EXTDMA, reg, val );
   461 	g2_dma_transfer( 0 );
   462 	break;
   463     case G2DMA0STOP:
   464 	break;
   465     case G2DMA1CTL1:
   466     case G2DMA1CTL2:
   467 	MMIO_WRITE( EXTDMA, reg, val );
   468 	g2_dma_transfer( 1 );
   469 	break;
   471     case G2DMA1STOP:
   472 	break;
   473     case G2DMA2CTL1:
   474     case G2DMA2CTL2:
   475 	MMIO_WRITE( EXTDMA, reg, val );
   476 	g2_dma_transfer( 2 );
   477 	break;
   478     case G2DMA2STOP:
   479 	break;
   480     case G2DMA3CTL1:
   481     case G2DMA3CTL2:
   482 	MMIO_WRITE( EXTDMA, reg, val );
   483 	g2_dma_transfer( 3 );
   484 	break;
   485     case G2DMA3STOP:
   486 	break;
   487     case PVRDMA2CTL1:
   488     case PVRDMA2CTL2:
   489 	if( val != 0 ) {
   490 	    ERROR( "Write to unimplemented DMA control register %08X", reg );
   491 	    //dreamcast_stop();
   492 	    //sh4_stop();
   493 	}
   494 	break;
   495     default:
   496             MMIO_WRITE( EXTDMA, reg, val );
   497     }
   498 }
   500 MMIO_REGION_READ_FN( EXTDMA, reg )
   501 {
   502     uint32_t val;
   503     if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
   504 	return 0xFFFFFFFF; /* disabled */
   505     }
   507     switch( reg ) {
   508     case IDEALTSTATUS: 
   509 	val = idereg.status;
   510 	return val;
   511     case IDEDATA: return ide_read_data_pio( );
   512     case IDEFEAT: return idereg.error;
   513     case IDECOUNT:return idereg.count;
   514     case IDELBA0: return idereg.disc;
   515     case IDELBA1: return idereg.lba1;
   516     case IDELBA2: return idereg.lba2;
   517     case IDEDEV: return idereg.device;
   518     case IDECMD:
   519 	val = ide_read_status();
   520 	return val;
   521     default:
   522 	val = MMIO_READ( EXTDMA, reg );
   523 	return val;
   524     }
   525 }
.