Search
lxdream.org :: lxdream/src/asic.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/asic.c
changeset 594:6118deafd705
prev586:2a3ba82cf243
next728:4dfc293b9d96
author nkeynes
date Sat Jan 26 03:45:49 2008 +0000 (16 years ago)
permissions -rw-r--r--
last change Ignore Numlock, Capslock, etc when checking for grab exit
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * Support for the miscellaneous ASIC functions (Primarily event multiplexing,
     5  * and DMA). 
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #define MODULE asic_module
    22 #include <assert.h>
    23 #include <stdlib.h>
    24 #include "dream.h"
    25 #include "mem.h"
    26 #include "sh4/intc.h"
    27 #include "sh4/dmac.h"
    28 #include "sh4/sh4.h"
    29 #include "dreamcast.h"
    30 #include "maple/maple.h"
    31 #include "gdrom/ide.h"
    32 #include "pvr2/pvr2.h"
    33 #include "asic.h"
    34 #define MMIO_IMPL
    35 #include "asic.h"
    36 /*
    37  * Open questions:
    38  *   1) Does changing the mask after event occurance result in the
    39  *      interrupt being delivered immediately?
    40  * TODO: Logic diagram of ASIC event/interrupt logic.
    41  *
    42  * ... don't even get me started on the "EXTDMA" page, about which, apparently,
    43  * practically nothing is publicly known...
    44  */
    46 static void asic_check_cleared_events( void );
    47 static void asic_init( void );
    48 static void asic_reset( void );
    49 static uint32_t asic_run_slice( uint32_t nanosecs );
    50 static void asic_save_state( FILE *f );
    51 static int asic_load_state( FILE *f );
    52 static uint32_t g2_update_fifo_status( uint32_t slice_cycle );
    54 struct dreamcast_module asic_module = { "ASIC", asic_init, asic_reset, NULL, asic_run_slice,
    55 					NULL, asic_save_state, asic_load_state };
    57 #define G2_BIT5_TICKS 60
    58 #define G2_BIT4_TICKS 160
    59 #define G2_BIT0_ON_TICKS 120
    60 #define G2_BIT0_OFF_TICKS 420
    62 struct asic_g2_state {
    63     int bit5_off_timer;
    64     int bit4_on_timer;
    65     int bit4_off_timer;
    66     int bit0_on_timer;
    67     int bit0_off_timer;
    68 };
    70 static struct asic_g2_state g2_state;
    72 static uint32_t asic_run_slice( uint32_t nanosecs )
    73 {
    74     g2_update_fifo_status(nanosecs);
    75     if( g2_state.bit5_off_timer <= (int32_t)nanosecs ) {
    76 	g2_state.bit5_off_timer = -1;
    77     } else {
    78 	g2_state.bit5_off_timer -= nanosecs;
    79     }
    81     if( g2_state.bit4_off_timer <= (int32_t)nanosecs ) {
    82 	g2_state.bit4_off_timer = -1;
    83     } else {
    84 	g2_state.bit4_off_timer -= nanosecs;
    85     }
    86     if( g2_state.bit4_on_timer <= (int32_t)nanosecs ) {
    87 	g2_state.bit4_on_timer = -1;
    88     } else {
    89 	g2_state.bit4_on_timer -= nanosecs;
    90     }
    92     if( g2_state.bit0_off_timer <= (int32_t)nanosecs ) {
    93 	g2_state.bit0_off_timer = -1;
    94     } else {
    95 	g2_state.bit0_off_timer -= nanosecs;
    96     }
    97     if( g2_state.bit0_on_timer <= (int32_t)nanosecs ) {
    98 	g2_state.bit0_on_timer = -1;
    99     } else {
   100 	g2_state.bit0_on_timer -= nanosecs;
   101     }
   103     return nanosecs;
   104 }
   106 static void asic_init( void )
   107 {
   108     register_io_region( &mmio_region_ASIC );
   109     register_io_region( &mmio_region_EXTDMA );
   110     asic_reset();
   111 }
   113 static void asic_reset( void )
   114 {
   115     memset( &g2_state, 0xFF, sizeof(g2_state) );
   116 }    
   118 static void asic_save_state( FILE *f )
   119 {
   120     fwrite( &g2_state, sizeof(g2_state), 1, f );
   121 }
   123 static int asic_load_state( FILE *f )
   124 {
   125     if( fread( &g2_state, sizeof(g2_state), 1, f ) != 1 )
   126 	return 1;
   127     else
   128 	return 0;
   129 }
   132 /**
   133  * Setup the timers for the 3 FIFO status bits following a write through the G2
   134  * bus from the SH4 side. The timing is roughly as follows: (times are
   135  * approximate based on software readings - I wouldn't take this as gospel but
   136  * it seems to be enough to fool most programs). 
   137  *    0ns: Bit 5 (Input fifo?) goes high immediately on the write
   138  *   40ns: Bit 5 goes low and bit 4 goes high
   139  *  120ns: Bit 4 goes low, bit 0 goes high
   140  *  240ns: Bit 0 goes low.
   141  *
   142  * Additional writes while the FIFO is in operation extend the time that the
   143  * bits remain high as one might expect, without altering the time at which
   144  * they initially go high.
   145  */
   146 void asic_g2_write_word()
   147 {
   148     if( g2_state.bit5_off_timer < (int32_t)sh4r.slice_cycle ) {
   149 	g2_state.bit5_off_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
   150     } else {
   151 	g2_state.bit5_off_timer += G2_BIT5_TICKS;
   152     }
   154     if( g2_state.bit4_on_timer < (int32_t)sh4r.slice_cycle ) {
   155 	g2_state.bit4_on_timer = sh4r.slice_cycle + G2_BIT5_TICKS;
   156     }
   158     if( g2_state.bit4_off_timer < (int32_t)sh4r.slice_cycle ) {
   159 	g2_state.bit4_off_timer = g2_state.bit4_on_timer + G2_BIT4_TICKS;
   160     } else {
   161 	g2_state.bit4_off_timer += G2_BIT4_TICKS;
   162     }
   164     if( g2_state.bit0_on_timer < (int32_t)sh4r.slice_cycle ) {
   165 	g2_state.bit0_on_timer = sh4r.slice_cycle + G2_BIT0_ON_TICKS;
   166     }
   168     if( g2_state.bit0_off_timer < (int32_t)sh4r.slice_cycle ) {
   169 	g2_state.bit0_off_timer = g2_state.bit0_on_timer + G2_BIT0_OFF_TICKS;
   170     } else {
   171 	g2_state.bit0_off_timer += G2_BIT0_OFF_TICKS;
   172     }
   174     MMIO_WRITE( ASIC, G2STATUS, MMIO_READ(ASIC, G2STATUS) | 0x20 );
   175 }
   177 static uint32_t g2_update_fifo_status( uint32_t nanos )
   178 {
   179     uint32_t val = MMIO_READ( ASIC, G2STATUS );
   180     if( ((uint32_t)g2_state.bit5_off_timer) <= nanos ) {
   181 	val = val & (~0x20);
   182 	g2_state.bit5_off_timer = -1;
   183     }
   184     if( ((uint32_t)g2_state.bit4_on_timer) <= nanos ) {
   185 	val = val | 0x10;
   186 	g2_state.bit4_on_timer = -1;
   187     }
   188     if( ((uint32_t)g2_state.bit4_off_timer) <= nanos ) {
   189 	val = val & (~0x10);
   190 	g2_state.bit4_off_timer = -1;
   191     } 
   193     if( ((uint32_t)g2_state.bit0_on_timer) <= nanos ) {
   194 	val = val | 0x01;
   195 	g2_state.bit0_on_timer = -1;
   196     }
   197     if( ((uint32_t)g2_state.bit0_off_timer) <= nanos ) {
   198 	val = val & (~0x01);
   199 	g2_state.bit0_off_timer = -1;
   200     } 
   202     MMIO_WRITE( ASIC, G2STATUS, val );
   203     return val;
   204 }   
   206 static int g2_read_status() {
   207     return g2_update_fifo_status( sh4r.slice_cycle );
   208 }
   211 void asic_event( int event )
   212 {
   213     int offset = ((event&0x60)>>3);
   214     int result = (MMIO_READ(ASIC, PIRQ0 + offset))  |=  (1<<(event&0x1F));
   216     if( result & MMIO_READ(ASIC, IRQA0 + offset) )
   217         intc_raise_interrupt( INT_IRQ13 );
   218     if( result & MMIO_READ(ASIC, IRQB0 + offset) )
   219         intc_raise_interrupt( INT_IRQ11 );
   220     if( result & MMIO_READ(ASIC, IRQC0 + offset) )
   221         intc_raise_interrupt( INT_IRQ9 );
   223     if( event >= 64 ) { /* Third word */
   224 	asic_event( EVENT_CASCADE2 );
   225     } else if( event >= 32 ) { /* Second word */
   226 	asic_event( EVENT_CASCADE1 );
   227     }
   228 }
   230 void asic_clear_event( int event ) {
   231     int offset = ((event&0x60)>>3);
   232     uint32_t result = MMIO_READ(ASIC, PIRQ0 + offset)  & (~(1<<(event&0x1F)));
   233     MMIO_WRITE( ASIC, PIRQ0 + offset, result );
   234     if( result == 0 ) {
   235 	/* clear cascades if necessary */
   236 	if( event >= 64 ) {
   237 	    MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
   238 	} else if( event >= 32 ) {
   239 	    MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0xBFFFFFFF );
   240 	}
   241     }
   243     asic_check_cleared_events();
   244 }
   246 void asic_check_cleared_events( )
   247 {
   248     int i, setA = 0, setB = 0, setC = 0;
   249     uint32_t bits;
   250     for( i=0; i<12; i+=4 ) {
   251 	bits = MMIO_READ( ASIC, PIRQ0 + i );
   252 	setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
   253 	setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
   254 	setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
   255     }
   256     if( setA == 0 )
   257 	intc_clear_interrupt( INT_IRQ13 );
   258     if( setB == 0 )
   259 	intc_clear_interrupt( INT_IRQ11 );
   260     if( setC == 0 )
   261 	intc_clear_interrupt( INT_IRQ9 );
   262 }
   264 void asic_event_mask_changed( )
   265 {
   266     int i, setA = 0, setB = 0, setC = 0;
   267     uint32_t bits;
   268     for( i=0; i<12; i+=4 ) {
   269 	bits = MMIO_READ( ASIC, PIRQ0 + i );
   270 	setA |= (bits & MMIO_READ(ASIC, IRQA0 + i ));
   271 	setB |= (bits & MMIO_READ(ASIC, IRQB0 + i ));
   272 	setC |= (bits & MMIO_READ(ASIC, IRQC0 + i ));
   273     }
   274     if( setA == 0 ) 
   275 	intc_clear_interrupt( INT_IRQ13 );
   276     else
   277 	intc_raise_interrupt( INT_IRQ13 );
   278     if( setB == 0 )
   279 	intc_clear_interrupt( INT_IRQ11 );
   280     else
   281 	intc_raise_interrupt( INT_IRQ11 );
   282     if( setC == 0 )
   283 	intc_clear_interrupt( INT_IRQ9 );
   284     else
   285 	intc_raise_interrupt( INT_IRQ9 );
   286 }
   288 void g2_dma_transfer( int channel )
   289 {
   290     uint32_t offset = channel << 5;
   292     if( MMIO_READ( EXTDMA, G2DMA0CTL1 + offset ) == 1 ) {
   293 	if( MMIO_READ( EXTDMA, G2DMA0CTL2 + offset ) == 1 ) {
   294 	    uint32_t extaddr = MMIO_READ( EXTDMA, G2DMA0EXT + offset );
   295 	    uint32_t sh4addr = MMIO_READ( EXTDMA, G2DMA0SH4 + offset );
   296 	    uint32_t length = MMIO_READ( EXTDMA, G2DMA0SIZ + offset ) & 0x1FFFFFFF;
   297 	    uint32_t dir = MMIO_READ( EXTDMA, G2DMA0DIR + offset );
   298 	    // uint32_t mode = MMIO_READ( EXTDMA, G2DMA0MOD + offset );
   299 	    unsigned char buf[length];
   300 	    if( dir == 0 ) { /* SH4 to device */
   301 		mem_copy_from_sh4( buf, sh4addr, length );
   302 		mem_copy_to_sh4( extaddr, buf, length );
   303 	    } else { /* Device to SH4 */
   304 		mem_copy_from_sh4( buf, extaddr, length );
   305 		mem_copy_to_sh4( sh4addr, buf, length );
   306 	    }
   307 	    MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
   308 	    asic_event( EVENT_G2_DMA0 + channel );
   309 	} else {
   310 	    MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 );
   311 	}
   312     }
   313 }
   315 void asic_ide_dma_transfer( )
   316 {	
   317     if( MMIO_READ( EXTDMA, IDEDMACTL2 ) == 1 ) {
   318 	if( MMIO_READ( EXTDMA, IDEDMACTL1 ) == 1 ) {
   319 	    MMIO_WRITE( EXTDMA, IDEDMATXSIZ, 0 );
   321 	    uint32_t addr = MMIO_READ( EXTDMA, IDEDMASH4 );
   322 	    uint32_t length = MMIO_READ( EXTDMA, IDEDMASIZ );
   323 	    // int dir = MMIO_READ( EXTDMA, IDEDMADIR );
   325 	    uint32_t xfer = ide_read_data_dma( addr, length );
   326 	    MMIO_WRITE( EXTDMA, IDEDMATXSIZ, xfer );
   327 	    MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
   328 	} else { /* 0 */
   329 	    MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 );
   330 	}
   331     }
   332 }
   334 void pvr_dma_transfer( )
   335 {
   336     sh4addr_t destaddr = MMIO_READ( ASIC, PVRDMADEST) &0x1FFFFFE0;
   337     uint32_t count = MMIO_READ( ASIC, PVRDMACNT );
   338     unsigned char *data = alloca( count );
   339     uint32_t rcount = DMAC_get_buffer( 2, data, count );
   340     if( rcount != count )
   341 	WARN( "PVR received %08X bytes from DMA, expected %08X", rcount, count );
   343     pvr2_dma_write( destaddr, data, rcount );
   345     MMIO_WRITE( ASIC, PVRDMACTL, 0 );
   346     MMIO_WRITE( ASIC, PVRDMACNT, 0 );
   347     if( destaddr & 0x01000000 ) { /* Write to texture RAM */
   348 	MMIO_WRITE( ASIC, PVRDMADEST, destaddr + rcount );
   349     }
   350     asic_event( EVENT_PVR_DMA );
   351 }
   353 void mmio_region_ASIC_write( uint32_t reg, uint32_t val )
   354 {
   355     switch( reg ) {
   356     case PIRQ1:
   357 	break; /* Treat this as read-only for the moment */
   358     case PIRQ0:
   359 	val = val & 0x3FFFFFFF; /* Top two bits aren't clearable */
   360 	MMIO_WRITE( ASIC, reg, MMIO_READ(ASIC, reg)&~val );
   361 	asic_check_cleared_events();
   362 	break;
   363     case PIRQ2:
   364 	/* Clear any events */
   365 	val = MMIO_READ(ASIC, reg)&(~val);
   366 	MMIO_WRITE( ASIC, reg, val );
   367 	if( val == 0 ) { /* all clear - clear the cascade bit */
   368 	    MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF );
   369 	}
   370 	asic_check_cleared_events();
   371 	break;
   372     case IRQA0:
   373     case IRQA1:
   374     case IRQA2:
   375     case IRQB0:
   376     case IRQB1:
   377     case IRQB2:
   378     case IRQC0:
   379     case IRQC1:
   380     case IRQC2:
   381 	MMIO_WRITE( ASIC, reg, val );
   382 	asic_event_mask_changed();
   383 	break;
   384     case SYSRESET:
   385 	if( val == 0x7611 ) {
   386 	    dreamcast_reset();
   387 	} else {
   388 	    WARN( "Unknown value %08X written to SYSRESET port", val );
   389 	}
   390 	break;
   391     case MAPLE_STATE:
   392 	MMIO_WRITE( ASIC, reg, val );
   393 	if( val & 1 ) {
   394 	    uint32_t maple_addr = MMIO_READ( ASIC, MAPLE_DMA) &0x1FFFFFE0;
   395 	    maple_handle_buffer( maple_addr );
   396 	    MMIO_WRITE( ASIC, reg, 0 );
   397 	}
   398 	break;
   399     case PVRDMADEST:
   400 	MMIO_WRITE( ASIC, reg, (val & 0x03FFFFE0) | 0x10000000 );
   401 	break;
   402     case PVRDMACNT: 
   403 	MMIO_WRITE( ASIC, reg, val & 0x00FFFFE0 );
   404 	break;
   405     case PVRDMACTL: /* Initiate PVR DMA transfer */
   406 	val = val & 0x01;
   407 	MMIO_WRITE( ASIC, reg, val );
   408 	if( val == 1 ) {
   409 	    pvr_dma_transfer();
   410 	}
   411 	break;
   413     case MAPLE_DMA:
   414 	MMIO_WRITE( ASIC, reg, val );
   415 	break;
   416     default:
   417 	MMIO_WRITE( ASIC, reg, val );
   418     }
   419 }
   421 int32_t mmio_region_ASIC_read( uint32_t reg )
   422 {
   423     int32_t val;
   424     switch( reg ) {
   425     case PIRQ0:
   426     case PIRQ1:
   427     case PIRQ2:
   428     case IRQA0:
   429     case IRQA1:
   430     case IRQA2:
   431     case IRQB0:
   432     case IRQB1:
   433     case IRQB2:
   434     case IRQC0:
   435     case IRQC1:
   436     case IRQC2:
   437     case MAPLE_STATE:
   438 	val = MMIO_READ(ASIC, reg);
   439 	return val;            
   440     case G2STATUS:
   441 	return g2_read_status();
   442     default:
   443 	val = MMIO_READ(ASIC, reg);
   444 	return val;
   445     }
   447 }
   449 MMIO_REGION_WRITE_FN( EXTDMA, reg, val )
   450 {
   451     if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
   452 	return; /* disabled */
   453     }
   455     switch( reg ) {
   456     case IDEALTSTATUS: /* Device control */
   457 	ide_write_control( val );
   458 	break;
   459     case IDEDATA:
   460 	ide_write_data_pio( val );
   461 	break;
   462     case IDEFEAT:
   463 	if( ide_can_write_regs() )
   464 	    idereg.feature = (uint8_t)val;
   465 	break;
   466     case IDECOUNT:
   467 	if( ide_can_write_regs() )
   468 	    idereg.count = (uint8_t)val;
   469 	break;
   470     case IDELBA0:
   471 	if( ide_can_write_regs() )
   472 	    idereg.lba0 = (uint8_t)val;
   473 	break;
   474     case IDELBA1:
   475 	if( ide_can_write_regs() )
   476 	    idereg.lba1 = (uint8_t)val;
   477 	break;
   478     case IDELBA2:
   479 	if( ide_can_write_regs() )
   480 	    idereg.lba2 = (uint8_t)val;
   481 	break;
   482     case IDEDEV:
   483 	if( ide_can_write_regs() )
   484 	    idereg.device = (uint8_t)val;
   485 	break;
   486     case IDECMD:
   487 	if( ide_can_write_regs() || val == IDE_CMD_NOP ) {
   488 	    ide_write_command( (uint8_t)val );
   489 	}
   490 	break;
   491     case IDEDMASH4:
   492 	MMIO_WRITE( EXTDMA, reg, val & 0x1FFFFFE0 );
   493 	break;
   494     case IDEDMASIZ:
   495 	MMIO_WRITE( EXTDMA, reg, val & 0x01FFFFFE );
   496 	break;
   497     case IDEDMADIR:
   498 	MMIO_WRITE( EXTDMA, reg, val & 1 );
   499 	break;
   500     case IDEDMACTL1:
   501     case IDEDMACTL2:
   502 	MMIO_WRITE( EXTDMA, reg, val & 0x01 );
   503 	asic_ide_dma_transfer( );
   504 	break;
   505     case IDEACTIVATE:
   506 	if( val == 0x001FFFFF ) {
   507 	    idereg.interface_enabled = TRUE;
   508 	    /* Conventional wisdom says that this is necessary but not
   509 	     * sufficient to enable the IDE interface.
   510 	     */
   511 	} else if( val == 0x000042FE ) {
   512 	    idereg.interface_enabled = FALSE;
   513 	}
   514 	break;
   515     case G2DMA0EXT: case G2DMA0SH4: case G2DMA0SIZ:
   516     case G2DMA1EXT: case G2DMA1SH4: case G2DMA1SIZ:
   517     case G2DMA2EXT: case G2DMA2SH4: case G2DMA2SIZ:
   518     case G2DMA3EXT: case G2DMA3SH4: case G2DMA3SIZ:
   519 	MMIO_WRITE( EXTDMA, reg, val & 0x9FFFFFE0 );
   520 	break;
   521     case G2DMA0MOD: case G2DMA1MOD: case G2DMA2MOD: case G2DMA3MOD:
   522 	MMIO_WRITE( EXTDMA, reg, val & 0x07 );
   523 	break;
   524     case G2DMA0DIR: case G2DMA1DIR: case G2DMA2DIR: case G2DMA3DIR:
   525 	MMIO_WRITE( EXTDMA, reg, val & 0x01 );
   526 	break;
   527     case G2DMA0CTL1:
   528     case G2DMA0CTL2:
   529 	MMIO_WRITE( EXTDMA, reg, val & 1);
   530 	g2_dma_transfer( 0 );
   531 	break;
   532     case G2DMA0STOP:
   533 	MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   534 	break;
   535     case G2DMA1CTL1:
   536     case G2DMA1CTL2:
   537 	MMIO_WRITE( EXTDMA, reg, val & 1);
   538 	g2_dma_transfer( 1 );
   539 	break;
   541     case G2DMA1STOP:
   542 	MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   543 	break;
   544     case G2DMA2CTL1:
   545     case G2DMA2CTL2:
   546 	MMIO_WRITE( EXTDMA, reg, val &1 );
   547 	g2_dma_transfer( 2 );
   548 	break;
   549     case G2DMA2STOP:
   550 	MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   551 	break;
   552     case G2DMA3CTL1:
   553     case G2DMA3CTL2:
   554 	MMIO_WRITE( EXTDMA, reg, val &1 );
   555 	g2_dma_transfer( 3 );
   556 	break;
   557     case G2DMA3STOP:
   558 	MMIO_WRITE( EXTDMA, reg, val & 0x37 );
   559 	break;
   560     case PVRDMA2CTL1:
   561     case PVRDMA2CTL2:
   562 	if( val != 0 ) {
   563 	    ERROR( "Write to unimplemented DMA control register %08X", reg );
   564 	}
   565 	break;
   566     default:
   567             MMIO_WRITE( EXTDMA, reg, val );
   568     }
   569 }
   571 MMIO_REGION_READ_FN( EXTDMA, reg )
   572 {
   573     uint32_t val;
   574     if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
   575 	return 0xFFFFFFFF; /* disabled */
   576     }
   578     switch( reg ) {
   579     case IDEALTSTATUS: 
   580 	val = idereg.status;
   581 	return val;
   582     case IDEDATA: return ide_read_data_pio( );
   583     case IDEFEAT: return idereg.error;
   584     case IDECOUNT:return idereg.count;
   585     case IDELBA0: return ide_get_drive_status();
   586     case IDELBA1: return idereg.lba1;
   587     case IDELBA2: return idereg.lba2;
   588     case IDEDEV: return idereg.device;
   589     case IDECMD:
   590 	val = ide_read_status();
   591 	return val;
   592     default:
   593 	val = MMIO_READ( EXTDMA, reg );
   594 	return val;
   595     }
   596 }
.