Search
lxdream.org :: lxdream/src/asic.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/asic.c
changeset 753:1fe39c3a9bbc
prev736:a02d1475ccfd
next833:1ea87e0221f8
author nkeynes
date Wed Jul 30 03:00:40 2008 +0000 (11 years ago)
permissions -rw-r--r--
last change Add debian changelog file
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * Support for the miscellaneous ASIC functions (Primarily event multiplexing,
     5  * and DMA). 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #define MODULE asic_module
    22 #include <assert.h>
    23 #include <stdlib.h>
    24 #include "dream.h"
    25 #include "mem.h"
    26 #include "sh4/intc.h"
    27 #include "sh4/dmac.h"
    28 #include "sh4/sh4.h"
    29 #include "dreamcast.h"
    30 #include "maple/maple.h"
    31 #include "gdrom/ide.h"
    32 #include "pvr2/pvr2.h"
    33 #include "asic.h"
    34 #define MMIO_IMPL
    35 #include "asic.h"
    36 /*
    37  * Open questions:
    38  *   1) Does changing the mask after event occurance result in the
    39  *      interrupt being delivered immediately?
    40  * TODO: Logic diagram of ASIC event/interrupt logic.
    41  *
    42  * ... don't even get me started on the "EXTDMA" page, about which, apparently,
    43  * practically nothing is publicly known...
    44  */
    46 static void asic_check_cleared_events( void );
    47 static void asic_init( void );
    48 static void asic_reset( void );
    49 static uint32_t asic_run_slice( uint32_t nanosecs );
    50 static void asic_save_state( FILE *f );
    51 static int asic_load_state( FILE *f );
    52 static uint32_t g2_update_fifo_status( uint32_t slice_cycle );
    54 struct dreamcast_module asic_module = { "ASIC", asic_init, asic_reset, NULL, asic_run_slice,
    55         NULL, asic_save_state, asic_load_state };
    57 #define G2_BIT5_TICKS 60
    58 #define G2_BIT4_TICKS 160
    59 #define G2_BIT0_ON_TICKS 120
    60 #define G2_BIT0_OFF_TICKS 420
    62 struct asic_g2_state {
    63     int bit5_off_timer;
    64     int bit4_on_timer;
    65     int bit4_off_timer;
    66     int bit0_on_timer;
    67     int bit0_off_timer;
    68 };
    70 static struct asic_g2_state g2_state;
    72 static uint32_t asic_run_slice( uint32_t nanosecs )
    73 {
    74     g2_update_fifo_status(nanosecs);
    75     if( g2_state.bit5_off_timer <= (int32_t)nanosecs ) {
    76         g2_state.bit5_off_timer = -1;
    77     } else {
    78         g2_state.bit5_off_timer -= nanosecs;
    79     }
    81     if( g2_state.bit4_off_timer <= (int32_t)nanosecs ) {
    82         g2_state.bit4_off_timer = -1;
    83     } else {
    84         g2_state.bit4_off_timer -= nanosecs;
    85     }
    86     if( g2_state.bit4_on_timer <= (int32_t)nanosecs ) {
    87         g2_state.bit4_on_timer = -1;
    88     } else {
    89         g2_state.bit4_on_timer -= nanosecs;
    90     }
    92     if( g2_state.bit0_off_timer <= (int32_t)nanosecs ) {
    93         g2_state.bit0_off_timer = -1;
    94     } else {
    95         g2_state.bit0_off_timer -= nanosecs;
    96     }
    97     if( g2_state.bit0_on_timer <= (int32_t)nanosecs ) {
    98         g2_state.bit0_on_timer = -1;
    99     } else {
   100         g2_state.bit0_on_timer -= nanosecs;
   101     }
   103     return nanosecs;
   104 }
   106 static void asic_init( void )
   107 {
   108     register_io_region( &mmio_region_ASIC );
   109     register_io_region( &mmio_region_EXTDMA );
   110     asic_reset();
   111 }
   113 static void asic_reset( void )
   114 {
   115     memset( &g2_state, 0xFF, sizeof(g2_state) );
   116 }    
   118 static void asic_save_state( FILE *f )
   119 {
   120     fwrite( &g2_state, sizeof(g2_state), 1, f );
   121 }
   123 static int asic_load_state( FILE *f )
   124 {
   125     if( fread( &g2_state, sizeof(g2_state), 1, f ) != 1 )
   126         return 1;
   127     else
   128         return 0;
   129 }
   132 /**
   133  * Setup the timers for the 3 FIFO status bits following a write through the G2
   134  * bus from the SH4 side. The timing is roughly as follows: (times are
   135  * approximate based on software readings - I wouldn't take this as gospel but
   136  * it seems to be enough to fool most programs). 
   137  *    0ns: Bit 5 (Input fifo?) goes high immediately on the write
   138  *   40ns: Bit 5 goes low and bit 4 goes high
   139  *  120ns: Bit 4 goes low, bit 0 goes high
   140  *  240ns: Bit 0 goes low.
   141  *
   142  * Additional writes while the FIFO is in operation extend the time that the
   143  * bits remain high as one might expect, without altering the time at which
   144  * they initially go high.
   145  */
   146 void asic_g2_write_word()
   147 {
   148     if( g2_state.bit5_off_timer < (int32_t)sh4r.slice_cycle ) {
   149         g2_state.bit5_off_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
   150     } else {
   151         g2_state.bit5_off_timer += G2_BIT5_TICKS;
   152     }
   154     if( g2_state.bit4_on_timer < (int32_t)sh4r.slice_cycle ) {
   155         g2_state.bit4_on_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
   156     }
   158     if( g2_state.bit4_off_timer < (int32_t)sh4r.slice_cycle ) {
   159         g2_state.bit4_off_timer = g2_state.bit4_on_timer + G2_BIT4_TICKS;
   160     } else {
   161         g2_state.bit4_off_timer += G2_BIT4_TICKS;
   162     }
   164     if( g2_state.bit0_on_timer < (int32_t)sh4r.slice_cycle ) {
   165         g2_state.bit0_on_timer = sh4r.slice_cycle + G2_BIT0_ON_TICKS;
   166     }
   168     if( g2_state.bit0_off_timer < (int32_t)sh4r.slice_cycle ) {
   169         g2_state.bit0_off_timer = g2_state.bit0_on_timer + G2_BIT0_OFF_TICKS;
   170     } else {
   171         g2_state.bit0_off_timer += G2_BIT0_OFF_TICKS;
   172     }
   174     MMIO_WRITE( ASIC, G2STATUS, MMIO_READ(ASIC, G2STATUS) | 0x20 );
   175 }
   177 static uint32_t g2_update_fifo_status( uint32_t nanos )
   178 {
   179     uint32_t val = MMIO_READ( ASIC, G2STATUS );
   180     if( ((uint32_t)g2_state.bit5_off_timer) <= nanos ) {
   181         val = val & (~0x20);
   182         g2_state.bit5_off_timer = -1;
   183     }
   184     if( ((uint32_t)g2_state.bit4_on_timer) <= nanos ) {
   185         val = val | 0x10;
   186         g2_state.bit4_on_timer = -1;
   187     }
   188     if( ((uint32_t)g2_state.bit4_off_timer) <= nanos ) {
   189         val = val & (~0x10);
   190         g2_state.bit4_off_timer = -1;
   191     } 
   193     if( ((uint32_t)g2_state.bit0_on_timer) <= nanos ) {
   194         val = val | 0x01;
   195         g2_state.bit0_on_timer = -1;
   196     }
   197     if( ((uint32_t)g2_state.bit0_off_timer) <= nanos ) {
   198         val = val & (~0x01);
   199         g2_state.bit0_off_timer = -1;
   200     } 
   202     MMIO_WRITE( ASIC, G2STATUS, val );
   203     return val;
   204 }   
   206 static int g2_read_status() {
   207     return g2_update_fifo_status( sh4r.slice_cycle );
   208 }
   211 void asic_event( int event )
   212 {
   213     int offset = ((event&0x60)>>3);
   214     int result = (MMIO_READ(ASIC, PIRQ0 + offset))  |=  (1<<(event&0x1F));
   216     if( result & MMIO_READ(ASIC, IRQA0 + offset) )
   217         intc_raise_interrupt( INT_IRQ13 );
   218     if( result & MMIO_READ(ASIC, IRQB0 + offset) )
   219         intc_raise_interrupt( INT_IRQ11 );
   220     if( result & MMIO_READ(ASIC, IRQC0 + offset) )
   221         intc_raise_interrupt( INT_IRQ9 );
   223     if( event >= 64 ) { /* Third word */
   224         asic_event( EVENT_CASCADE2 );
   225     } else if( event >= 32 ) { /* Second word */
   226         asic_event( EVENT_CASCADE1 );
   227     }
   228 }
   230 void asic_clear_event( int event ) {
   231     int offset = ((event&0x60)>>3);
   232     uint32_t result = MMIO_READ(ASIC, PIRQ0 + offset)  & (~(1<<(event&0x1F)));
   233     MMIO_WRITE( ASIC, PIRQ0 + offset, result );
   234     if( result == 0 ) {
   235         /* clear cascades if necessary */
   236         if( event >= 64 ) {
   237             MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
   238         } else if( event >= 32 ) {
   239             MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0xBFFFFFFF );
   240         }
   241     }
   243     asic_check_cleared_events();
   244 }
   246 void asic_check_cleared_events( )
   247 {
   248     int i, setA = 0, setB = 0, setC = 0;
   249     uint32_t bits;
   250     for( i=0; i<12; i+=4 ) {
   251         bits = MMIO_READ( ASIC, PIRQ0 + i );
   252         setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
   253         setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
   254         setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
   255     }
   256     if( setA == 0 )
   257         intc_clear_interrupt( INT_IRQ13 );
   258     if( setB == 0 )
   259         intc_clear_interrupt( INT_IRQ11 );
   260     if( setC == 0 )
   261         intc_clear_interrupt( INT_IRQ9 );
   262 }
   264 void asic_event_mask_changed( )
   265 {
   266     int i, setA = 0, setB = 0, setC = 0;
   267     uint32_t bits;
   268     for( i=0; i<12; i+=4 ) {
   269         bits = MMIO_READ( ASIC, PIRQ0 + i );
   270         setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
   271         setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
   272         setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
   273     }
   274     if( setA == 0 ) 
   275         intc_clear_interrupt( INT_IRQ13 );
   276     else
   277         intc_raise_interrupt( INT_IRQ13 );
   278     if( setB == 0 )
   279         intc_clear_interrupt( INT_IRQ11 );
   280     else
   281         intc_raise_interrupt( INT_IRQ11 );
   282     if( setC == 0 )
   283         intc_clear_interrupt( INT_IRQ9 );
   284     else
   285         intc_raise_interrupt( INT_IRQ9 );
   286 }
   288 void g2_dma_transfer( int channel )
   289 {
   290     uint32_t offset = channel << 5;
   292     if( MMIO_READ( EXTDMA, G2DMA0CTL1 + offset ) == 1 ) {
   293         if( MMIO_READ( EXTDMA, G2DMA0CTL2 + offset ) == 1 ) {
   294             uint32_t extaddr = MMIO_READ( EXTDMA, G2DMA0EXT + offset );
   295             uint32_t sh4addr = MMIO_READ( EXTDMA, G2DMA0SH4 + offset );
   296             uint32_t length = MMIO_READ( EXTDMA, G2DMA0SIZ + offset ) & 0x1FFFFFFF;
   297             uint32_t dir = MMIO_READ( EXTDMA, G2DMA0DIR + offset );
   298             // uint32_t mode = MMIO_READ( EXTDMA, G2DMA0MOD + offset );
   299             unsigned char buf[length];
   300             if( dir == 0 ) { /* SH4 to device */
   301                 mem_copy_from_sh4( buf, sh4addr, length );
   302                 mem_copy_to_sh4( extaddr, buf, length );
   303             } else { /* Device to SH4 */
   304                 mem_copy_from_sh4( buf, extaddr, length );
   305                 mem_copy_to_sh4( sh4addr, buf, length );
   306             }
   307             MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
   308             asic_event( EVENT_G2_DMA0 + channel );
   309         } else {
   310             MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
   311         }
   312     }
   313 }
   315 void asic_ide_dma_transfer( )
   316 {	
   317     if( MMIO_READ( EXTDMA, IDEDMACTL2 ) == 1 ) {
   318         if( MMIO_READ( EXTDMA, IDEDMACTL1 ) == 1 ) {
   319             MMIO_WRITE( EXTDMA, IDEDMATXSIZ, 0 );
   321             uint32_t addr = MMIO_READ( EXTDMA, IDEDMASH4 );
   322             uint32_t length = MMIO_READ( EXTDMA, IDEDMASIZ );
   323             // int dir = MMIO_READ( EXTDMA, IDEDMADIR );
   325             uint32_t xfer = ide_read_data_dma( addr, length );
   326             MMIO_WRITE( EXTDMA, IDEDMATXSIZ, xfer );
   327             MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
   328         } else { /* 0 */
   329             MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
   330         }
   331     }
   332 }
   334 void pvr_dma_transfer( )
   335 {
   336     sh4addr_t destaddr = MMIO_READ( ASIC, PVRDMADEST) &0x1FFFFFE0;
   337     uint32_t count = MMIO_READ( ASIC, PVRDMACNT );
   338     unsigned char *data = alloca( count );
   339     uint32_t rcount = DMAC_get_buffer( 2, data, count );
   340     if( rcount != count )
   341         WARN( "PVR received %08X bytes from DMA, expected %08X", rcount, count );
   343     pvr2_dma_write( destaddr, data, rcount );
   345     MMIO_WRITE( ASIC, PVRDMACTL, 0 );
   346     MMIO_WRITE( ASIC, PVRDMACNT, 0 );
   347     if( destaddr & 0x01000000 ) { /* Write to texture RAM */
   348         MMIO_WRITE( ASIC, PVRDMADEST, destaddr + rcount );
   349     }
   350     asic_event( EVENT_PVR_DMA );
   351 }
   353 void sort_dma_transfer( )
   354 {
   355     sh4addr_t table_addr = MMIO_READ( ASIC, SORTDMATBL );
   356     sh4addr_t data_addr = MMIO_READ( ASIC, SORTDMADATA );
   357     int table_size = MMIO_READ( ASIC, SORTDMATSIZ );
   358     int addr_shift = MMIO_READ( ASIC, SORTDMAASIZ ) ? 5 : 0;
   359     int count = 1;
   361     uint32_t *table32 = (uint32_t *)mem_get_region( table_addr );
   362     uint16_t *table16 = (uint16_t *)table32;
   363     uint32_t next = table_size ? (*table32++) : (uint32_t)(*table16++);
   364     while(1) {
   365         next &= 0x07FFFFFF;
   366         if( next == 1 ) {
   367             next = table_size ? (*table32++) : (uint32_t)(*table16++);
   368             count++;
   369             continue;
   370         } else if( next == 2 ) {
   371             asic_event( EVENT_SORT_DMA );
   372             break;
   373         } 
   374         uint32_t *data = (uint32_t *)mem_get_region(data_addr + (next<<addr_shift));
   375         if( data == NULL ) {
   376             break;
   377         }
   379         uint32_t *poly = pvr2_ta_find_polygon_context(data, 128);
   380         if( poly == NULL ) {
   381             asic_event( EVENT_SORT_DMA_ERR );
   382             break;
   383         }
   384         uint32_t size = poly[6] & 0xFF;
   385         if( size == 0 ) {
   386             size = 0x100;
   387         }
   388         next = poly[7];
   389         pvr2_ta_write( (unsigned char *)data, size<<5 );
   390     }
   392     MMIO_WRITE( ASIC, SORTDMACNT, count );
   393     MMIO_WRITE( ASIC, SORTDMACTL, 0 );
   394 }
   396 void mmio_region_ASIC_write( uint32_t reg, uint32_t val )
   397 {
   398     switch( reg ) {
   399     case PIRQ1:
   400         break; /* Treat this as read-only for the moment */
   401     case PIRQ0:
   402         val = val & 0x3FFFFFFF; /* Top two bits aren't clearable */
   403         MMIO_WRITE( ASIC, reg, MMIO_READ(ASIC, reg)&~val );
   404         asic_check_cleared_events();
   405         break;
   406     case PIRQ2:
   407         /* Clear any events */
   408         val = MMIO_READ(ASIC, reg)&(~val);
   409         MMIO_WRITE( ASIC, reg, val );
   410         if( val == 0 ) { /* all clear - clear the cascade bit */
   411             MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
   412         }
   413         asic_check_cleared_events();
   414         break;
   415     case IRQA0:
   416     case IRQA1:
   417     case IRQA2:
   418     case IRQB0:
   419     case IRQB1:
   420     case IRQB2:
   421     case IRQC0:
   422     case IRQC1:
   423     case IRQC2:
   424         MMIO_WRITE( ASIC, reg, val );
   425         asic_event_mask_changed();
   426         break;
   427     case SYSRESET:
   428         if( val == 0x7611 ) {
   429             dreamcast_reset();
   430         } else {
   431             WARN( "Unknown value %08X written to SYSRESET port", val );
   432         }
   433         break;
   434     case MAPLE_STATE:
   435         MMIO_WRITE( ASIC, reg, val );
   436         if( val & 1 ) {
   437             uint32_t maple_addr = MMIO_READ( ASIC, MAPLE_DMA) &0x1FFFFFE0;
   438             maple_handle_buffer( maple_addr );
   439             MMIO_WRITE( ASIC, reg, 0 );
   440         }
   441         break;
   442     case PVRDMADEST:
   443         MMIO_WRITE( ASIC, reg, (val & 0x03FFFFE0) | 0x10000000 );
   444         break;
   445     case PVRDMACNT: 
   446         MMIO_WRITE( ASIC, reg, val & 0x00FFFFE0 );
   447         break;
   448     case PVRDMACTL: /* Initiate PVR DMA transfer */
   449         val = val & 0x01;
   450         MMIO_WRITE( ASIC, reg, val );
   451         if( val == 1 ) {
   452             pvr_dma_transfer();
   453         }
   454         break;
   455     case SORTDMATBL: case SORTDMADATA:
   456         MMIO_WRITE( ASIC, reg, (val & 0x0FFFFFE0) | 0x08000000 );
   457         break;
   458     case SORTDMATSIZ: case SORTDMAASIZ:
   459         MMIO_WRITE( ASIC, reg, (val & 1) );
   460         break;
   461     case SORTDMACTL:
   462         val = val & 1;
   463         MMIO_WRITE( ASIC, reg, val );
   464         if( val == 1 ) {
   465             sort_dma_transfer();
   466         }
   467         break;
   468     case MAPLE_DMA:
   469         MMIO_WRITE( ASIC, reg, val );
   470         break;
   471     default:
   472         MMIO_WRITE( ASIC, reg, val );
   473     }
   474 }
   476 int32_t mmio_region_ASIC_read( uint32_t reg )
   477 {
   478     int32_t val;
   479     switch( reg ) {
   480     case PIRQ0:
   481     case PIRQ1:
   482     case PIRQ2:
   483     case IRQA0:
   484     case IRQA1:
   485     case IRQA2:
   486     case IRQB0:
   487     case IRQB1:
   488     case IRQB2:
   489     case IRQC0:
   490     case IRQC1:
   491     case IRQC2:
   492     case MAPLE_STATE:
   493         val = MMIO_READ(ASIC, reg);
   494         return val;            
   495     case G2STATUS:
   496         return g2_read_status();
   497     default:
   498         val = MMIO_READ(ASIC, reg);
   499         return val;
   500     }
   502 }
   504 MMIO_REGION_WRITE_FN( EXTDMA, reg, val )
   505 {
   506     if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
   507         return; /* disabled */
   508     }
   510     switch( reg ) {
   511     case IDEALTSTATUS: /* Device control */
   512         ide_write_control( val );
   513         break;
   514     case IDEDATA:
   515         ide_write_data_pio( val );
   516         break;
   517     case IDEFEAT:
   518         if( ide_can_write_regs() )
   519             idereg.feature = (uint8_t)val;
   520         break;
   521     case IDECOUNT:
   522         if( ide_can_write_regs() )
   523             idereg.count = (uint8_t)val;
   524         break;
   525     case IDELBA0:
   526         if( ide_can_write_regs() )
   527             idereg.lba0 = (uint8_t)val;
   528         break;
   529     case IDELBA1:
   530         if( ide_can_write_regs() )
   531             idereg.lba1 = (uint8_t)val;
   532         break;
   533     case IDELBA2:
   534         if( ide_can_write_regs() )
   535             idereg.lba2 = (uint8_t)val;
   536         break;
   537     case IDEDEV:
   538         if( ide_can_write_regs() )
   539             idereg.device = (uint8_t)val;
   540         break;
   541     case IDECMD:
   542         if( ide_can_write_regs() || val == IDE_CMD_NOP ) {
   543             ide_write_command( (uint8_t)val );
   544         }
   545         break;
   546     case IDEDMASH4:
   547         MMIO_WRITE( EXTDMA, reg, val & 0x1FFFFFE0 );
   548         break;
   549     case IDEDMASIZ:
   550         MMIO_WRITE( EXTDMA, reg, val & 0x01FFFFFE );
   551         break;
   552     case IDEDMADIR:
   553         MMIO_WRITE( EXTDMA, reg, val & 1 );
   554         break;
   555     case IDEDMACTL1:
   556     case IDEDMACTL2:
   557         MMIO_WRITE( EXTDMA, reg, val & 0x01 );
   558         asic_ide_dma_transfer( );
   559         break;
   560     case IDEACTIVATE:
   561         if( val == 0x001FFFFF ) {
   562             idereg.interface_enabled = TRUE;
   563             /* Conventional wisdom says that this is necessary but not
   564              * sufficient to enable the IDE interface.
   565              */
   566         } else if( val == 0x000042FE ) {
   567             idereg.interface_enabled = FALSE;
   568         }
   569         break;
   570     case G2DMA0EXT: case G2DMA0SH4: case G2DMA0SIZ:
   571     case G2DMA1EXT: case G2DMA1SH4: case G2DMA1SIZ:
   572     case G2DMA2EXT: case G2DMA2SH4: case G2DMA2SIZ:
   573     case G2DMA3EXT: case G2DMA3SH4: case G2DMA3SIZ:
   574         MMIO_WRITE( EXTDMA, reg, val & 0x9FFFFFE0 );
   575         break;
   576     case G2DMA0MOD: case G2DMA1MOD: case G2DMA2MOD: case G2DMA3MOD:
   577         MMIO_WRITE( EXTDMA, reg, val & 0x07 );
   578         break;
   579     case G2DMA0DIR: case G2DMA1DIR: case G2DMA2DIR: case G2DMA3DIR:
   580         MMIO_WRITE( EXTDMA, reg, val & 0x01 );
   581         break;
   582     case G2DMA0CTL1:
   583     case G2DMA0CTL2:
   584         MMIO_WRITE( EXTDMA, reg, val & 1);
   585         g2_dma_transfer( 0 );
   586         break;
   587     case G2DMA0STOP:
   588         MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   589         break;
   590     case G2DMA1CTL1:
   591     case G2DMA1CTL2:
   592         MMIO_WRITE( EXTDMA, reg, val & 1);
   593         g2_dma_transfer( 1 );
   594         break;
   596     case G2DMA1STOP:
   597         MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   598         break;
   599     case G2DMA2CTL1:
   600     case G2DMA2CTL2:
   601         MMIO_WRITE( EXTDMA, reg, val &1 );
   602         g2_dma_transfer( 2 );
   603         break;
   604     case G2DMA2STOP:
   605         MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   606         break;
   607     case G2DMA3CTL1:
   608     case G2DMA3CTL2:
   609         MMIO_WRITE( EXTDMA, reg, val &1 );
   610         g2_dma_transfer( 3 );
   611         break;
   612     case G2DMA3STOP:
   613         MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   614         break;
   615     case PVRDMA2CTL1:
   616     case PVRDMA2CTL2:
   617         if( val != 0 ) {
   618             ERROR( "Write to unimplemented DMA control register %08X", reg );
   619         }
   620         break;
   621     default:
   622         MMIO_WRITE( EXTDMA, reg, val );
   623     }
   624 }
   626 MMIO_REGION_READ_FN( EXTDMA, reg )
   627 {
   628     uint32_t val;
   629     if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
   630         return 0xFFFFFFFF; /* disabled */
   631     }
   633     switch( reg ) {
   634     case IDEALTSTATUS: 
   635         val = idereg.status;
   636         return val;
   637     case IDEDATA: return ide_read_data_pio( );
   638     case IDEFEAT: return idereg.error;
   639     case IDECOUNT:return idereg.count;
   640     case IDELBA0: return ide_get_drive_status();
   641     case IDELBA1: return idereg.lba1;
   642     case IDELBA2: return idereg.lba2;
   643     case IDEDEV: return idereg.device;
   644     case IDECMD:
   645         val = ide_read_status();
   646         return val;
   647     default:
   648         val = MMIO_READ( EXTDMA, reg );
   649         return val;
   650     }
   651 }
.