Search
lxdream.org :: lxdream/src/asic.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/asic.c
changeset 1237:377077d10d62
prev1100:50e702af9373
next1269:50c63f63bf8f
author nkeynes
date Sun Mar 04 21:10:12 2012 +1000 (12 years ago)
permissions -rw-r--r--
last change Move glsl loading into common gl code, and set a display capability flag
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * Support for the miscellaneous ASIC functions (Primarily event multiplexing,
     5  * and DMA). 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #define MODULE asic_module
    22 #include <assert.h>
    23 #include <stdlib.h>
    24 #include "eventq.h"
    25 #include "dream.h"
    26 #include "mem.h"
    27 #include "sh4/intc.h"
    28 #include "sh4/dmac.h"
    29 #include "sh4/sh4.h"
    30 #include "dreamcast.h"
    31 #include "maple/maple.h"
    32 #include "gdrom/ide.h"
    33 #include "pvr2/pvr2.h"
    34 #include "asic.h"
    35 #define MMIO_IMPL
    36 #include "asic.h"
    37 /*
    38  * Open questions:
    39  *   1) Does changing the mask after event occurance result in the
    40  *      interrupt being delivered immediately?
    41  * TODO: Logic diagram of ASIC event/interrupt logic.
    42  *
    43  * ... don't even get me started on the "EXTDMA" page, about which, apparently,
    44  * practically nothing is publicly known...
    45  */
    47 static void asic_check_cleared_events( void );
    48 static void asic_init( void );
    49 static void asic_reset( void );
    50 static uint32_t asic_run_slice( uint32_t nanosecs );
    51 static void asic_save_state( FILE *f );
    52 static int asic_load_state( FILE *f );
    53 static uint32_t g2_update_fifo_status( uint32_t slice_cycle );
    55 struct dreamcast_module asic_module = { "ASIC", asic_init, asic_reset, NULL, asic_run_slice,
    56         NULL, asic_save_state, asic_load_state };
    58 #define G2_BIT5_TICKS 60
    59 #define G2_BIT4_TICKS 160
    60 #define G2_BIT0_ON_TICKS 120
    61 #define G2_BIT0_OFF_TICKS 420
    63 struct asic_g2_state {
    64     int bit5_off_timer;
    65     int bit4_on_timer;
    66     int bit4_off_timer;
    67     int bit0_on_timer;
    68     int bit0_off_timer;
    69 };
    71 static struct asic_g2_state g2_state;
    73 static uint32_t asic_run_slice( uint32_t nanosecs )
    74 {
    75     g2_update_fifo_status(nanosecs);
    76     if( g2_state.bit5_off_timer <= (int32_t)nanosecs ) {
    77         g2_state.bit5_off_timer = -1;
    78     } else {
    79         g2_state.bit5_off_timer -= nanosecs;
    80     }
    82     if( g2_state.bit4_off_timer <= (int32_t)nanosecs ) {
    83         g2_state.bit4_off_timer = -1;
    84     } else {
    85         g2_state.bit4_off_timer -= nanosecs;
    86     }
    87     if( g2_state.bit4_on_timer <= (int32_t)nanosecs ) {
    88         g2_state.bit4_on_timer = -1;
    89     } else {
    90         g2_state.bit4_on_timer -= nanosecs;
    91     }
    93     if( g2_state.bit0_off_timer <= (int32_t)nanosecs ) {
    94         g2_state.bit0_off_timer = -1;
    95     } else {
    96         g2_state.bit0_off_timer -= nanosecs;
    97     }
    98     if( g2_state.bit0_on_timer <= (int32_t)nanosecs ) {
    99         g2_state.bit0_on_timer = -1;
   100     } else {
   101         g2_state.bit0_on_timer -= nanosecs;
   102     }
   104     return nanosecs;
   105 }
   107 static void asic_init( void )
   108 {
   109     register_io_region( &mmio_region_ASIC );
   110     register_io_region( &mmio_region_EXTDMA );
   111     asic_reset();
   112 }
   114 static void asic_reset( void )
   115 {
   116     memset( &g2_state, 0xFF, sizeof(g2_state) );
   117 }    
   119 static void asic_save_state( FILE *f )
   120 {
   121     fwrite( &g2_state, sizeof(g2_state), 1, f );
   122 }
   124 static int asic_load_state( FILE *f )
   125 {
   126     if( fread( &g2_state, sizeof(g2_state), 1, f ) != 1 )
   127         return 1;
   128     else
   129         return 0;
   130 }
   133 /**
   134  * Setup the timers for the 3 FIFO status bits following a write through the G2
   135  * bus from the SH4 side. The timing is roughly as follows: (times are
   136  * approximate based on software readings - I wouldn't take this as gospel but
   137  * it seems to be enough to fool most programs). 
   138  *    0ns: Bit 5 (Input fifo?) goes high immediately on the write
   139  *   40ns: Bit 5 goes low and bit 4 goes high
   140  *  120ns: Bit 4 goes low, bit 0 goes high
   141  *  240ns: Bit 0 goes low.
   142  *
   143  * Additional writes while the FIFO is in operation extend the time that the
   144  * bits remain high as one might expect, without altering the time at which
   145  * they initially go high.
   146  */
   147 void asic_g2_write_word()
   148 {
   149     if( g2_state.bit5_off_timer < (int32_t)sh4r.slice_cycle ) {
   150         g2_state.bit5_off_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
   151     } else {
   152         g2_state.bit5_off_timer += G2_BIT5_TICKS;
   153     }
   155     if( g2_state.bit4_on_timer < (int32_t)sh4r.slice_cycle ) {
   156         g2_state.bit4_on_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
   157     }
   159     if( g2_state.bit4_off_timer < (int32_t)sh4r.slice_cycle ) {
   160         g2_state.bit4_off_timer = g2_state.bit4_on_timer + G2_BIT4_TICKS;
   161     } else {
   162         g2_state.bit4_off_timer += G2_BIT4_TICKS;
   163     }
   165     if( g2_state.bit0_on_timer < (int32_t)sh4r.slice_cycle ) {
   166         g2_state.bit0_on_timer = sh4r.slice_cycle + G2_BIT0_ON_TICKS;
   167     }
   169     if( g2_state.bit0_off_timer < (int32_t)sh4r.slice_cycle ) {
   170         g2_state.bit0_off_timer = g2_state.bit0_on_timer + G2_BIT0_OFF_TICKS;
   171     } else {
   172         g2_state.bit0_off_timer += G2_BIT0_OFF_TICKS;
   173     }
   175     MMIO_WRITE( ASIC, G2STATUS, MMIO_READ(ASIC, G2STATUS) | 0x20 );
   176 }
   178 static uint32_t g2_update_fifo_status( uint32_t nanos )
   179 {
   180     uint32_t val = MMIO_READ( ASIC, G2STATUS );
   181     if( ((uint32_t)g2_state.bit5_off_timer) <= nanos ) {
   182         val = val & (~0x20);
   183         g2_state.bit5_off_timer = -1;
   184     }
   185     if( ((uint32_t)g2_state.bit4_on_timer) <= nanos ) {
   186         val = val | 0x10;
   187         g2_state.bit4_on_timer = -1;
   188     }
   189     if( ((uint32_t)g2_state.bit4_off_timer) <= nanos ) {
   190         val = val & (~0x10);
   191         g2_state.bit4_off_timer = -1;
   192     } 
   194     if( ((uint32_t)g2_state.bit0_on_timer) <= nanos ) {
   195         val = val | 0x01;
   196         g2_state.bit0_on_timer = -1;
   197     }
   198     if( ((uint32_t)g2_state.bit0_off_timer) <= nanos ) {
   199         val = val & (~0x01);
   200         g2_state.bit0_off_timer = -1;
   201     } 
   203     MMIO_WRITE( ASIC, G2STATUS, val );
   204     return val;
   205 }   
   207 static int g2_read_status() {
   208     return g2_update_fifo_status( sh4r.slice_cycle );
   209 }
   212 void asic_event( int event )
   213 {
   214     int offset = ((event&0x60)>>3);
   215     int result = (MMIO_READ(ASIC, PIRQ0 + offset))  |=  (1<<(event&0x1F));
   217     if( result & MMIO_READ(ASIC, IRQA0 + offset) )
   218         intc_raise_interrupt( INT_IRQ13 );
   219     if( result & MMIO_READ(ASIC, IRQB0 + offset) )
   220         intc_raise_interrupt( INT_IRQ11 );
   221     if( result & MMIO_READ(ASIC, IRQC0 + offset) )
   222         intc_raise_interrupt( INT_IRQ9 );
   224     if( event >= 64 ) { /* Third word */
   225         asic_event( EVENT_CASCADE2 );
   226     } else if( event >= 32 ) { /* Second word */
   227         asic_event( EVENT_CASCADE1 );
   228     }
   229 }
   231 void asic_clear_event( int event ) {
   232     int offset = ((event&0x60)>>3);
   233     uint32_t result = MMIO_READ(ASIC, PIRQ0 + offset)  & (~(1<<(event&0x1F)));
   234     MMIO_WRITE( ASIC, PIRQ0 + offset, result );
   235     if( result == 0 ) {
   236         /* clear cascades if necessary */
   237         if( event >= 64 ) {
   238             MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
   239         } else if( event >= 32 ) {
   240             MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0xBFFFFFFF );
   241         }
   242     }
   244     asic_check_cleared_events();
   245 }
   247 void asic_check_cleared_events( )
   248 {
   249     int i, setA = 0, setB = 0, setC = 0;
   250     uint32_t bits;
   251     for( i=0; i<12; i+=4 ) {
   252         bits = MMIO_READ( ASIC, PIRQ0 + i );
   253         setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
   254         setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
   255         setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
   256     }
   257     if( setA == 0 )
   258         intc_clear_interrupt( INT_IRQ13 );
   259     if( setB == 0 )
   260         intc_clear_interrupt( INT_IRQ11 );
   261     if( setC == 0 )
   262         intc_clear_interrupt( INT_IRQ9 );
   263 }
   265 void asic_event_mask_changed( )
   266 {
   267     int i, setA = 0, setB = 0, setC = 0;
   268     uint32_t bits;
   269     for( i=0; i<12; i+=4 ) {
   270         bits = MMIO_READ( ASIC, PIRQ0 + i );
   271         setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
   272         setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
   273         setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
   274     }
   275     if( setA == 0 ) 
   276         intc_clear_interrupt( INT_IRQ13 );
   277     else
   278         intc_raise_interrupt( INT_IRQ13 );
   279     if( setB == 0 )
   280         intc_clear_interrupt( INT_IRQ11 );
   281     else
   282         intc_raise_interrupt( INT_IRQ11 );
   283     if( setC == 0 )
   284         intc_clear_interrupt( INT_IRQ9 );
   285     else
   286         intc_raise_interrupt( INT_IRQ9 );
   287 }
   289 void g2_dma_transfer( int channel )
   290 {
   291     uint32_t offset = channel << 5;
   293     if( MMIO_READ( EXTDMA, G2DMA0CTL1 + offset ) == 1 ) {
   294         if( MMIO_READ( EXTDMA, G2DMA0CTL2 + offset ) == 1 ) {
   295             uint32_t extaddr = MMIO_READ( EXTDMA, G2DMA0EXT + offset );
   296             uint32_t sh4addr = MMIO_READ( EXTDMA, G2DMA0SH4 + offset );
   297             uint32_t length = MMIO_READ( EXTDMA, G2DMA0SIZ + offset ) & 0x1FFFFFFF;
   298             uint32_t dir = MMIO_READ( EXTDMA, G2DMA0DIR + offset );
   299             // uint32_t mode = MMIO_READ( EXTDMA, G2DMA0MOD + offset );
   300             unsigned char buf[length];
   301             if( dir == 0 ) { /* SH4 to device */
   302                 mem_copy_from_sh4( buf, sh4addr, length );
   303                 mem_copy_to_sh4( extaddr, buf, length );
   304             } else { /* Device to SH4 */
   305                 mem_copy_from_sh4( buf, extaddr, length );
   306                 mem_copy_to_sh4( sh4addr, buf, length );
   307             }
   308             MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
   309             asic_event( EVENT_G2_DMA0 + channel );
   310         } else {
   311             MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
   312         }
   313     }
   314 }
   316 void asic_ide_dma_transfer( )
   317 {	
   318     if( MMIO_READ( EXTDMA, IDEDMACTL2 ) == 1 ) {
   319         if( MMIO_READ( EXTDMA, IDEDMACTL1 ) == 1 ) {
   320             MMIO_WRITE( EXTDMA, IDEDMATXSIZ, 0 );
   322             uint32_t addr = MMIO_READ( EXTDMA, IDEDMASH4 );
   323             uint32_t length = MMIO_READ( EXTDMA, IDEDMASIZ );
   324             // int dir = MMIO_READ( EXTDMA, IDEDMADIR );
   326             uint32_t xfer = ide_read_data_dma( addr, length );
   327             MMIO_WRITE( EXTDMA, IDEDMATXSIZ, xfer );
   328             MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
   329             asic_event( EVENT_IDE_DMA );            
   330         } else { /* 0 */
   331             MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
   332         }
   333     }
   334 }
   336 void pvr_dma_transfer( )
   337 {
   338     sh4addr_t destaddr = MMIO_READ( ASIC, PVRDMADEST) &0x1FFFFFE0;
   339     uint32_t count = MMIO_READ( ASIC, PVRDMACNT );
   340     unsigned char *data = alloca( count );
   341     uint32_t rcount = DMAC_get_buffer( 2, data, count );
   342     if( rcount != count )
   343         WARN( "PVR received %08X bytes from DMA, expected %08X", rcount, count );
   345     pvr2_dma_write( destaddr, data, rcount );
   347     MMIO_WRITE( ASIC, PVRDMACTL, 0 );
   348     MMIO_WRITE( ASIC, PVRDMACNT, 0 );
   349     if( destaddr & 0x01000000 ) { /* Write to texture RAM */
   350         MMIO_WRITE( ASIC, PVRDMADEST, destaddr + rcount );
   351     }
   352     asic_event( EVENT_PVR_DMA );
   353 }
   355 void pvr_dma2_transfer()
   356 {
   357     if( MMIO_READ( EXTDMA, PVRDMA2CTL2 ) == 1 ) {
   358         if( MMIO_READ( EXTDMA, PVRDMA2CTL1 ) == 1 ) {
   359             sh4addr_t extaddr = MMIO_READ( EXTDMA, PVRDMA2EXT );
   360             sh4addr_t sh4addr = MMIO_READ( EXTDMA, PVRDMA2SH4 );
   361             int dir = MMIO_READ( EXTDMA, PVRDMA2DIR );
   362             uint32_t length = MMIO_READ( EXTDMA, PVRDMA2SIZ );
   363             unsigned char buf[length];
   364             if( dir == 0 ) { /* SH4 to PVR */
   365                 mem_copy_from_sh4( buf, sh4addr, length );
   366                 mem_copy_to_sh4( extaddr, buf, length );
   367             } else { /* PVR to SH4 */
   368                 mem_copy_from_sh4( buf, extaddr, length );
   369                 mem_copy_to_sh4( sh4addr, buf, length );
   370             }
   371             MMIO_WRITE( EXTDMA, PVRDMA2CTL2, 0 );
   372             asic_event( EVENT_PVR_DMA2 );
   373         }
   374     }
   375 }
   377 void sort_dma_transfer( )
   378 {
   379     sh4addr_t table_addr = MMIO_READ( ASIC, SORTDMATBL );
   380     sh4addr_t data_addr = MMIO_READ( ASIC, SORTDMADATA );
   381     int table_size = MMIO_READ( ASIC, SORTDMATSIZ );
   382     int addr_shift = MMIO_READ( ASIC, SORTDMAASIZ ) ? 5 : 0;
   383     int count = 1;
   385     uint32_t *table32 = (uint32_t *)mem_get_region( table_addr );
   386     uint16_t *table16 = (uint16_t *)table32;
   387     uint32_t next = table_size ? (*table32++) : (uint32_t)(*table16++);
   388     while(1) {
   389         next &= 0x07FFFFFF;
   390         if( next == 1 ) {
   391             next = table_size ? (*table32++) : (uint32_t)(*table16++);
   392             count++;
   393             continue;
   394         } else if( next == 2 ) {
   395             asic_event( EVENT_SORT_DMA );
   396             break;
   397         } 
   398         uint32_t *data = (uint32_t *)mem_get_region(data_addr + (next<<addr_shift));
   399         if( data == NULL ) {
   400             break;
   401         }
   403         uint32_t *poly = pvr2_ta_find_polygon_context(data, 128);
   404         if( poly == NULL ) {
   405             asic_event( EVENT_SORT_DMA_ERR );
   406             break;
   407         }
   408         uint32_t size = poly[6] & 0xFF;
   409         if( size == 0 ) {
   410             size = 0x100;
   411         }
   412         next = poly[7];
   413         pvr2_ta_write( (unsigned char *)data, size<<5 );
   414     }
   416     MMIO_WRITE( ASIC, SORTDMACNT, count );
   417     MMIO_WRITE( ASIC, SORTDMACTL, 0 );
   418 }
   420 void maple_set_dma_state( uint32_t val )
   421 {
   422     gboolean in_transfer = MMIO_READ( ASIC, MAPLE_STATE ) & 1;
   423     gboolean transfer_requested = val & 1;
   424     if( !in_transfer && transfer_requested ) {
   425         /* Initiate new DMA transfer */
   426         uint32_t maple_addr = MMIO_READ( ASIC, MAPLE_DMA) &0x1FFFFFE0;
   427         maple_handle_buffer( maple_addr );
   428     }
   429     else if ( in_transfer && !transfer_requested ) {
   430         /* Cancel current DMA transfer */
   431         event_cancel( EVENT_MAPLE_DMA );
   432     }
   433     MMIO_WRITE( ASIC, MAPLE_STATE, val );
   434 }
   436 gboolean asic_enable_ide_interface( gboolean enable )
   437 {
   438     gboolean oldval = idereg.interface_enabled;
   439     idereg.interface_enabled = enable;
   440     return oldval;
   441 }
   443 MMIO_REGION_READ_FN( ASIC, reg )
   444 {
   445     int32_t val;
   446     reg &= 0xFFF;
   447     switch( reg ) {
   448     case PIRQ0:
   449     case PIRQ1:
   450     case PIRQ2:
   451     case IRQA0:
   452     case IRQA1:
   453     case IRQA2:
   454     case IRQB0:
   455     case IRQB1:
   456     case IRQB2:
   457     case IRQC0:
   458     case IRQC1:
   459     case IRQC2:
   460     case MAPLE_STATE:
   461         val = MMIO_READ(ASIC, reg);
   462         return val;            
   463     case G2STATUS:
   464         return g2_read_status();
   465     default:
   466         val = MMIO_READ(ASIC, reg);
   467         return val;
   468     }
   470 }
   472 MMIO_REGION_READ_DEFSUBFNS(ASIC)
   474 MMIO_REGION_WRITE_FN( ASIC, reg, val )
   475 {
   476     reg &= 0xFFF;
   477     switch( reg ) {
   478     case PIRQ1:
   479         break; /* Treat this as read-only for the moment */
   480     case PIRQ0:
   481         val = val & 0x3FFFFFFF; /* Top two bits aren't clearable */
   482         MMIO_WRITE( ASIC, reg, MMIO_READ(ASIC, reg)&~val );
   483         asic_check_cleared_events();
   484         break;
   485     case PIRQ2:
   486         /* Clear any events */
   487         val = MMIO_READ(ASIC, reg)&(~val);
   488         MMIO_WRITE( ASIC, reg, val );
   489         if( val == 0 ) { /* all clear - clear the cascade bit */
   490             MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
   491         }
   492         asic_check_cleared_events();
   493         break;
   494     case IRQA0:
   495     case IRQA1:
   496     case IRQA2:
   497     case IRQB0:
   498     case IRQB1:
   499     case IRQB2:
   500     case IRQC0:
   501     case IRQC1:
   502     case IRQC2:
   503         MMIO_WRITE( ASIC, reg, val );
   504         asic_event_mask_changed();
   505         break;
   506     case SYSRESET:
   507         if( val == 0x7611 ) {
   508             dreamcast_reset();
   509         } else {
   510             WARN( "Unknown value %08X written to SYSRESET port", val );
   511         }
   512         break;
   513     case MAPLE_STATE:
   514         maple_set_dma_state( val );
   515         break;
   516     case PVRDMADEST:
   517         MMIO_WRITE( ASIC, reg, (val & 0x03FFFFE0) | 0x10000000 );
   518         break;
   519     case PVRDMACNT: 
   520         MMIO_WRITE( ASIC, reg, val & 0x00FFFFE0 );
   521         break;
   522     case PVRDMACTL: /* Initiate PVR DMA transfer */
   523         val = val & 0x01;
   524         MMIO_WRITE( ASIC, reg, val );
   525         if( val == 1 ) {
   526             pvr_dma_transfer();
   527         }
   528         break;
   529     case SORTDMATBL: case SORTDMADATA:
   530         MMIO_WRITE( ASIC, reg, (val & 0x0FFFFFE0) | 0x08000000 );
   531         break;
   532     case SORTDMATSIZ: case SORTDMAASIZ:
   533         MMIO_WRITE( ASIC, reg, (val & 1) );
   534         break;
   535     case SORTDMACTL:
   536         val = val & 1;
   537         MMIO_WRITE( ASIC, reg, val );
   538         if( val == 1 ) {
   539             sort_dma_transfer();
   540         }
   541         break;
   542     case MAPLE_DMA:
   543         MMIO_WRITE( ASIC, reg, val );
   544         break;
   545     default:
   546         MMIO_WRITE( ASIC, reg, val );
   547     }
   548 }
   550 MMIO_REGION_READ_FN( EXTDMA, reg )
   551 {
   552     uint32_t val;
   553     reg &= 0xFFF;
   554     if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
   555         return 0xFFFFFFFF; /* disabled */
   556     }
   558     switch( reg ) {
   559     case IDEALTSTATUS: 
   560         val = idereg.status;
   561         return val;
   562     case IDEDATA: return ide_read_data_pio( );
   563     case IDEFEAT: return idereg.error;
   564     case IDECOUNT:return idereg.count;
   565     case IDELBA0: return ide_get_drive_status();
   566     case IDELBA1: return idereg.lba1;
   567     case IDELBA2: return idereg.lba2;
   568     case IDEDEV: return idereg.device;
   569     case IDECMD:
   570         val = ide_read_status();
   571         return val;
   572     default:
   573         val = MMIO_READ( EXTDMA, reg );
   574         return val;
   575     }
   576 }
   577 MMIO_REGION_READ_DEFSUBFNS(EXTDMA)
   580 MMIO_REGION_WRITE_FN( EXTDMA, reg, val )
   581 {
   582     reg &= 0xFFF;
   583     if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
   584         return; /* disabled */
   585     }
   587     switch( reg ) {
   588     case IDEALTSTATUS: /* Device control */
   589         ide_write_control( val );
   590         break;
   591     case IDEDATA:
   592         ide_write_data_pio( val );
   593         break;
   594     case IDEFEAT:
   595         if( ide_can_write_regs() )
   596             idereg.feature = (uint8_t)val;
   597         break;
   598     case IDECOUNT:
   599         if( ide_can_write_regs() )
   600             idereg.count = (uint8_t)val;
   601         break;
   602     case IDELBA0:
   603         if( ide_can_write_regs() )
   604             idereg.lba0 = (uint8_t)val;
   605         break;
   606     case IDELBA1:
   607         if( ide_can_write_regs() )
   608             idereg.lba1 = (uint8_t)val;
   609         break;
   610     case IDELBA2:
   611         if( ide_can_write_regs() )
   612             idereg.lba2 = (uint8_t)val;
   613         break;
   614     case IDEDEV:
   615         if( ide_can_write_regs() )
   616             idereg.device = (uint8_t)val;
   617         break;
   618     case IDECMD:
   619         if( ide_can_write_regs() || val == IDE_CMD_NOP ) {
   620             ide_write_command( (uint8_t)val );
   621         }
   622         break;
   623     case IDEDMASH4:
   624         MMIO_WRITE( EXTDMA, reg, val & 0x1FFFFFE0 );
   625         break;
   626     case IDEDMASIZ:
   627         MMIO_WRITE( EXTDMA, reg, val & 0x01FFFFFE );
   628         break;
   629     case IDEDMADIR:
   630         MMIO_WRITE( EXTDMA, reg, val & 1 );
   631         break;
   632     case IDEDMACTL1:
   633     case IDEDMACTL2:
   634         MMIO_WRITE( EXTDMA, reg, val & 0x01 );
   635         asic_ide_dma_transfer( );
   636         break;
   637     case IDEACTIVATE:
   638         if( val == 0x001FFFFF ) {
   639             idereg.interface_enabled = TRUE;
   640             /* Conventional wisdom says that this is necessary but not
   641              * sufficient to enable the IDE interface.
   642              */
   643         } else if( val == 0x000042FE ) {
   644             idereg.interface_enabled = FALSE;
   645         }
   646         break;
   647     case G2DMA0EXT: case G2DMA0SH4: case G2DMA0SIZ:
   648     case G2DMA1EXT: case G2DMA1SH4: case G2DMA1SIZ:
   649     case G2DMA2EXT: case G2DMA2SH4: case G2DMA2SIZ:
   650     case G2DMA3EXT: case G2DMA3SH4: case G2DMA3SIZ:
   651         MMIO_WRITE( EXTDMA, reg, val & 0x9FFFFFE0 );
   652         break;
   653     case G2DMA0MOD: case G2DMA1MOD: case G2DMA2MOD: case G2DMA3MOD:
   654         MMIO_WRITE( EXTDMA, reg, val & 0x07 );
   655         break;
   656     case G2DMA0DIR: case G2DMA1DIR: case G2DMA2DIR: case G2DMA3DIR:
   657         MMIO_WRITE( EXTDMA, reg, val & 0x01 );
   658         break;
   659     case G2DMA0CTL1:
   660     case G2DMA0CTL2:
   661         MMIO_WRITE( EXTDMA, reg, val & 1);
   662         g2_dma_transfer( 0 );
   663         break;
   664     case G2DMA0STOP:
   665         MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   666         break;
   667     case G2DMA1CTL1:
   668     case G2DMA1CTL2:
   669         MMIO_WRITE( EXTDMA, reg, val & 1);
   670         g2_dma_transfer( 1 );
   671         break;
   673     case G2DMA1STOP:
   674         MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   675         break;
   676     case G2DMA2CTL1:
   677     case G2DMA2CTL2:
   678         MMIO_WRITE( EXTDMA, reg, val &1 );
   679         g2_dma_transfer( 2 );
   680         break;
   681     case G2DMA2STOP:
   682         MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   683         break;
   684     case G2DMA3CTL1:
   685     case G2DMA3CTL2:
   686         MMIO_WRITE( EXTDMA, reg, val &1 );
   687         g2_dma_transfer( 3 );
   688         break;
   689     case G2DMA3STOP:
   690         MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   691         break;
   692     case PVRDMA2CTL1:
   693     case PVRDMA2CTL2:
   694         MMIO_WRITE( EXTDMA, reg, val & 1 );
   695         pvr_dma2_transfer();
   696         break;
   697     default:
   698         MMIO_WRITE( EXTDMA, reg, val );
   699     }
   700 }
.