Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 571:9bc09948d0f2
prev570:d2893980fbf5
next577:a181aeacd6e8
author nkeynes
date Mon Jan 14 09:08:58 2008 +0000 (15 years ago)
branchlxdream-mmu
permissions -rw-r--r--
last change Fix TRAPA in emulator core
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include "sh4/sh4mmio.h"
    22 #include "sh4/sh4core.h"
    23 #include "mem.h"
    25 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    27 /* The MMU (practically unique in the system) is allowed to raise exceptions
    28  * directly, with a return code indicating that one was raised and the caller
    29  * had better behave appropriately.
    30  */
    31 #define RAISE_TLB_ERROR(code, vpn) \
    32     MMIO_WRITE(MMU, TEA, vpn); \
    33     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    34     sh4_raise_tlb_exception(code);
    36 #define RAISE_MEM_ERROR(code, vpn) \
    37     MMIO_WRITE(MMU, TEA, vpn); \
    38     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    39     sh4_raise_exception(code);
    41 #define RAISE_OTHER_ERROR(code) \
    42     sh4_raise_exception(code);
    43 /**
    44  * Abort with a non-MMU address error. Caused by user-mode code attempting
    45  * to access privileged regions, or alignment faults.
    46  */
    47 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    48 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    50 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    51 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    52 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    53 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    54 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    55 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    56     MMIO_WRITE(MMU, TEA, vpn); \
    57     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    60 #define OCRAM_START (0x1C000000>>PAGE_BITS)
    61 #define OCRAM_END   (0x20000000>>PAGE_BITS)
    63 #define ITLB_ENTRY_COUNT 4
    64 #define UTLB_ENTRY_COUNT 64
    66 /* Entry address */
    67 #define TLB_VALID     0x00000100
    68 #define TLB_USERMODE  0x00000040
    69 #define TLB_WRITABLE  0x00000020
    70 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    71 #define TLB_SIZE_MASK 0x00000090
    72 #define TLB_SIZE_1K   0x00000000
    73 #define TLB_SIZE_4K   0x00000010
    74 #define TLB_SIZE_64K  0x00000080
    75 #define TLB_SIZE_1M   0x00000090
    76 #define TLB_CACHEABLE 0x00000008
    77 #define TLB_DIRTY     0x00000004
    78 #define TLB_SHARE     0x00000002
    79 #define TLB_WRITETHRU 0x00000001
    81 #define MASK_1K  0xFFFFFC00
    82 #define MASK_4K  0xFFFFF000
    83 #define MASK_64K 0xFFFF0000
    84 #define MASK_1M  0xFFF00000
    86 struct itlb_entry {
    87     sh4addr_t vpn; // Virtual Page Number
    88     uint32_t asid; // Process ID
    89     uint32_t mask;
    90     sh4addr_t ppn; // Physical Page Number
    91     uint32_t flags;
    92 };
    94 struct utlb_entry {
    95     sh4addr_t vpn; // Virtual Page Number
    96     uint32_t mask; // Page size mask
    97     uint32_t asid; // Process ID
    98     sh4addr_t ppn; // Physical Page Number
    99     uint32_t flags;
   100     uint32_t pcmcia; // extra pcmcia data - not used
   101 };
   103 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   104 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   105 static uint32_t mmu_urc;
   106 static uint32_t mmu_urb;
   107 static uint32_t mmu_lrui;
   108 static uint32_t mmu_asid; // current asid
   110 static sh4ptr_t cache = NULL;
   112 static void mmu_invalidate_tlb();
   115 static uint32_t get_mask_for_flags( uint32_t flags )
   116 {
   117     switch( flags & TLB_SIZE_MASK ) {
   118     case TLB_SIZE_1K: return MASK_1K;
   119     case TLB_SIZE_4K: return MASK_4K;
   120     case TLB_SIZE_64K: return MASK_64K;
   121     case TLB_SIZE_1M: return MASK_1M;
   122     }
   123 }
   125 int32_t mmio_region_MMU_read( uint32_t reg )
   126 {
   127     switch( reg ) {
   128     case MMUCR:
   129 	return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   130     default:
   131 	return MMIO_READ( MMU, reg );
   132     }
   133 }
   135 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   136 {
   137     uint32_t tmp;
   138     switch(reg) {
   139     case PTEH:
   140 	val &= 0xFFFFFCFF;
   141 	if( (val & 0xFF) != mmu_asid ) {
   142 	    mmu_asid = val&0xFF;
   143 	    sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   144 	}
   145 	break;
   146     case PTEL:
   147 	val &= 0x1FFFFDFF;
   148 	break;
   149     case PTEA:
   150 	val &= 0x0000000F;
   151 	break;
   152     case MMUCR:
   153 	if( val & MMUCR_TI ) {
   154 	    mmu_invalidate_tlb();
   155 	}
   156 	mmu_urc = (val >> 10) & 0x3F;
   157 	mmu_urb = (val >> 18) & 0x3F;
   158 	mmu_lrui = (val >> 26) & 0x3F;
   159 	val &= 0x00000301;
   160 	tmp = MMIO_READ( MMU, MMUCR );
   161 	if( ((val ^ tmp) & MMUCR_AT) && sh4_is_using_xlat() ) {
   162 	    // AT flag has changed state - flush the xlt cache as all bets
   163 	    // are off now. We also need to force an immediate exit from the
   164 	    // current block
   165 	    MMIO_WRITE( MMU, MMUCR, val );
   166 	    sh4_translate_flush_cache();
   167 	}
   168 	break;
   169     case CCR:
   170 	mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
   171 	break;
   172     default:
   173 	break;
   174     }
   175     MMIO_WRITE( MMU, reg, val );
   176 }
   179 void MMU_init() 
   180 {
   181     cache = mem_alloc_pages(2);
   182 }
   184 void MMU_reset()
   185 {
   186     mmio_region_MMU_write( CCR, 0 );
   187 }
   189 void MMU_save_state( FILE *f )
   190 {
   191     fwrite( cache, 4096, 2, f );
   192     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   193     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   194     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   195     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   196     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   197     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   198 }
   200 int MMU_load_state( FILE *f )
   201 {
   202     /* Setup the cache mode according to the saved register value
   203      * (mem_load runs before this point to load all MMIO data)
   204      */
   205     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   206     if( fread( cache, 4096, 2, f ) != 2 ) {
   207 	return 1;
   208     }
   209     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   210 	return 1;
   211     }
   212     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   213 	return 1;
   214     }
   215     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   216 	return 1;
   217     }
   218     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   219 	return 1;
   220     }
   221     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   222 	return 1;
   223     }
   224     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   225     	return 1;
   226     }
   227     return 0;
   228 }
   230 void mmu_set_cache_mode( int mode )
   231 {
   232     uint32_t i;
   233     switch( mode ) {
   234         case MEM_OC_INDEX0: /* OIX=0 */
   235             for( i=OCRAM_START; i<OCRAM_END; i++ )
   236                 page_map[i] = cache + ((i&0x02)<<(PAGE_BITS-1));
   237             break;
   238         case MEM_OC_INDEX1: /* OIX=1 */
   239             for( i=OCRAM_START; i<OCRAM_END; i++ )
   240                 page_map[i] = cache + ((i&0x02000000)>>(25-PAGE_BITS));
   241             break;
   242         default: /* disabled */
   243             for( i=OCRAM_START; i<OCRAM_END; i++ )
   244                 page_map[i] = NULL;
   245             break;
   246     }
   247 }
   249 /* TLB maintanence */
   251 /**
   252  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   253  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   254  */
   255 void MMU_ldtlb()
   256 {
   257     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   258     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   259     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   260     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   261     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   262     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   263 }
   265 static void mmu_invalidate_tlb()
   266 {
   267     int i;
   268     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   269 	mmu_itlb[i].flags &= (~TLB_VALID);
   270     }
   271     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   272 	mmu_utlb[i].flags &= (~TLB_VALID);
   273     }
   274 }
   276 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   278 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   279 {
   280     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   281     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   282 }
   283 int32_t mmu_itlb_data_read( sh4addr_t addr )
   284 {
   285     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   286     return ent->ppn | ent->flags;
   287 }
   289 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   290 {
   291     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   292     ent->vpn = val & 0xFFFFFC00;
   293     ent->asid = val & 0x000000FF;
   294     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   295 }
   297 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   298 {
   299     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   300     ent->ppn = val & 0x1FFFFC00;
   301     ent->flags = val & 0x00001DA;
   302     ent->mask = get_mask_for_flags(val);
   303 }
   305 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   306 #define UTLB_ASSOC(addr) (addr&0x80)
   307 #define UTLB_DATA2(addr) (addr&0x00800000)
   309 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   310 {
   311     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   312     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   313 	((ent->flags & TLB_DIRTY)<<7);
   314 }
   315 int32_t mmu_utlb_data_read( sh4addr_t addr )
   316 {
   317     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   318     if( UTLB_DATA2(addr) ) {
   319 	return ent->pcmcia;
   320     } else {
   321 	return ent->ppn | ent->flags;
   322     }
   323 }
   325 /**
   326  * Find a UTLB entry for the associative TLB write - same as the normal
   327  * lookup but ignores the valid bit.
   328  */
   329 static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   330 {
   331     int result = -1;
   332     unsigned int i;
   333     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   334 	if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
   335 	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   336 	    if( result != -1 ) {
   337 		return -2;
   338 	    }
   339 	    result = i;
   340 	}
   341     }
   342     return result;
   343 }
   345 /**
   346  * Find a ITLB entry for the associative TLB write - same as the normal
   347  * lookup but ignores the valid bit.
   348  */
   349 static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   350 {
   351     int result = -1;
   352     unsigned int i;
   353     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   354 	if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
   355 	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   356 	    if( result != -1 ) {
   357 		return -2;
   358 	    }
   359 	    result = i;
   360 	}
   361     }
   362     return result;
   363 }
   365 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   366 {
   367     if( UTLB_ASSOC(addr) ) {
   368 	uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
   369 	int utlb = mmu_utlb_lookup_assoc( val, asid );
   370 	if( utlb >= 0 ) {
   371 	    struct utlb_entry *ent = &mmu_utlb[utlb];
   372 	    ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   373 	    ent->flags |= (val & TLB_VALID);
   374 	    ent->flags |= ((val & 0x200)>>7);
   375 	}
   377 	int itlb = mmu_itlb_lookup_assoc( val, asid );
   378 	if( itlb >= 0 ) {
   379 	    struct itlb_entry *ent = &mmu_itlb[itlb];
   380 	    ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   381 	}
   383 	if( itlb == -2 || utlb == -2 ) {
   384 	    MMU_TLB_MULTI_HIT_ERROR(addr);
   385 	    return;
   386 	}
   387     } else {
   388 	struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   389 	ent->vpn = (val & 0xFFFFFC00);
   390 	ent->asid = (val & 0xFF);
   391 	ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   392 	ent->flags |= (val & TLB_VALID);
   393 	ent->flags |= ((val & 0x200)>>7);
   394     }
   395 }
   397 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   398 {
   399     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   400     if( UTLB_DATA2(addr) ) {
   401 	ent->pcmcia = val & 0x0000000F;
   402     } else {
   403 	ent->ppn = (val & 0x1FFFFC00);
   404 	ent->flags = (val & 0x000001FF);
   405 	ent->mask = get_mask_for_flags(val);
   406     }
   407 }
   409 /* Cache access - not implemented */
   411 int32_t mmu_icache_addr_read( sh4addr_t addr )
   412 {
   413     return 0; // not implemented
   414 }
   415 int32_t mmu_icache_data_read( sh4addr_t addr )
   416 {
   417     return 0; // not implemented
   418 }
   419 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   420 {
   421     return 0; // not implemented
   422 }
   423 int32_t mmu_ocache_data_read( sh4addr_t addr )
   424 {
   425     return 0; // not implemented
   426 }
   428 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   429 {
   430 }
   432 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   433 {
   434 }
   436 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   437 {
   438 }
   440 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   441 {
   442 }
   444 /******************************************************************************/
   445 /*                        MMU TLB address translation                         */
   446 /******************************************************************************/
   448 /**
   449  * The translations are excessively complicated, but unfortunately it's a 
   450  * complicated system. TODO: make this not be painfully slow.
   451  */
   453 /**
   454  * Perform the actual utlb lookup w/ asid matching.
   455  * Possible utcomes are:
   456  *   0..63 Single match - good, return entry found
   457  *   -1 No match - raise a tlb data miss exception
   458  *   -2 Multiple matches - raise a multi-hit exception (reset)
   459  * @param vpn virtual address to resolve
   460  * @return the resultant UTLB entry, or an error.
   461  */
   462 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   463 {
   464     int result = -1;
   465     unsigned int i;
   467     mmu_urc++;
   468     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   469 	mmu_urc = 0;
   470     }
   472     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   473 	if( (mmu_utlb[i].flags & TLB_VALID) &&
   474 	    ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) && 
   475 	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   476 	    if( result != -1 ) {
   477 		return -2;
   478 	    }
   479 	    result = i;
   480 	}
   481     }
   482     return result;
   483 }
   485 /**
   486  * Perform the actual utlb lookup matching on vpn only
   487  * Possible utcomes are:
   488  *   0..63 Single match - good, return entry found
   489  *   -1 No match - raise a tlb data miss exception
   490  *   -2 Multiple matches - raise a multi-hit exception (reset)
   491  * @param vpn virtual address to resolve
   492  * @return the resultant UTLB entry, or an error.
   493  */
   494 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   495 {
   496     int result = -1;
   497     unsigned int i;
   499     mmu_urc++;
   500     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   501 	mmu_urc = 0;
   502     }
   504     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   505 	if( (mmu_utlb[i].flags & TLB_VALID) &&
   506 	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   507 	    if( result != -1 ) {
   508 		return -2;
   509 	    }
   510 	    result = i;
   511 	}
   512     }
   514     return result;
   515 }
   517 /**
   518  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   519  * @return the number (0-3) of the replaced entry.
   520  */
   521 static int inline mmu_itlb_update_from_utlb( int entryNo )
   522 {
   523     int replace;
   524     /* Determine entry to replace based on lrui */
   525     if( mmu_lrui & 0x38 == 0x38 ) {
   526 	replace = 0;
   527 	mmu_lrui = mmu_lrui & 0x07;
   528     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   529 	replace = 1;
   530 	mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   531     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   532 	replace = 2;
   533 	mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   534     } else { // Note - gets invalid entries too
   535 	replace = 3;
   536 	mmu_lrui = (mmu_lrui | 0x0B);
   537     } 
   539     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   540     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   541     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   542     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   543     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   544     return replace;
   545 }
   547 /**
   548  * Perform the actual itlb lookup w/ asid protection
   549  * Possible utcomes are:
   550  *   0..63 Single match - good, return entry found
   551  *   -1 No match - raise a tlb data miss exception
   552  *   -2 Multiple matches - raise a multi-hit exception (reset)
   553  * @param vpn virtual address to resolve
   554  * @return the resultant ITLB entry, or an error.
   555  */
   556 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   557 {
   558     int result = -1;
   559     unsigned int i;
   561     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   562 	if( (mmu_itlb[i].flags & TLB_VALID) &&
   563 	    ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) && 
   564 	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   565 	    if( result != -1 ) {
   566 		return -2;
   567 	    }
   568 	    result = i;
   569 	}
   570     }
   572     if( result == -1 ) {
   573 	int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   574 	if( utlbEntry == -1 ) {
   575 	    return -1;
   576 	} else {
   577 	    return mmu_itlb_update_from_utlb( utlbEntry );
   578 	}
   579     }
   581     switch( result ) {
   582     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   583     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   584     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   585     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   586     }
   588     return result;
   589 }
   591 /**
   592  * Perform the actual itlb lookup on vpn only
   593  * Possible utcomes are:
   594  *   0..63 Single match - good, return entry found
   595  *   -1 No match - raise a tlb data miss exception
   596  *   -2 Multiple matches - raise a multi-hit exception (reset)
   597  * @param vpn virtual address to resolve
   598  * @return the resultant ITLB entry, or an error.
   599  */
   600 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   601 {
   602     int result = -1;
   603     unsigned int i;
   605     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   606 	if( (mmu_itlb[i].flags & TLB_VALID) &&
   607 	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   608 	    if( result != -1 ) {
   609 		return -2;
   610 	    }
   611 	    result = i;
   612 	}
   613     }
   615     if( result == -1 ) {
   616 	int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   617 	if( utlbEntry == -1 ) {
   618 	    return -1;
   619 	} else {
   620 	    return mmu_itlb_update_from_utlb( utlbEntry );
   621 	}
   622     }
   624     switch( result ) {
   625     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   626     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   627     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   628     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   629     }
   631     return result;
   632 }
   634 sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
   635 {
   636     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   637     if( addr & 0x80000000 ) {
   638 	if( IS_SH4_PRIVMODE() ) {
   639 	    if( addr >= 0xE0000000 ) {
   640 		return addr; /* P4 - passthrough */
   641 	    } else if( addr < 0xC0000000 ) {
   642 		/* P1, P2 regions are pass-through (no translation) */
   643 		return VMA_TO_EXT_ADDR(addr);
   644 	    }
   645 	} else {
   646 	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   647 		((mmucr&MMUCR_SQMD) == 0) ) {
   648 		/* Conditional user-mode access to the store-queue (no translation) */
   649 		return addr;
   650 	    }
   651 	    MMU_READ_ADDR_ERROR();
   652 	    return MMU_VMA_ERROR;
   653 	}
   654     }
   656     if( (mmucr & MMUCR_AT) == 0 ) {
   657 	return VMA_TO_EXT_ADDR(addr);
   658     }
   660     /* If we get this far, translation is required */
   661     int entryNo;
   662     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   663 	entryNo = mmu_utlb_lookup_vpn_asid( addr );
   664     } else {
   665 	entryNo = mmu_utlb_lookup_vpn( addr );
   666     }
   668     switch(entryNo) {
   669     case -1:
   670 	MMU_TLB_READ_MISS_ERROR(addr);
   671 	return MMU_VMA_ERROR;
   672     case -2:
   673 	MMU_TLB_MULTI_HIT_ERROR(addr);
   674 	return MMU_VMA_ERROR;
   675     default:
   676 	if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   677 	    !IS_SH4_PRIVMODE() ) {
   678 	    /* protection violation */
   679 	    MMU_TLB_READ_PROT_ERROR(addr);
   680 	    return MMU_VMA_ERROR;
   681 	}
   683 	/* finally generate the target address */
   684 	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   685 	    (addr & (~mmu_utlb[entryNo].mask));
   686     }
   687 }
   689 sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
   690 {
   691     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   692     if( addr & 0x80000000 ) {
   693 	if( IS_SH4_PRIVMODE() ) {
   694 	    if( addr >= 0xE0000000 ) {
   695 		return addr; /* P4 - passthrough */
   696 	    } else if( addr < 0xC0000000 ) {
   697 		/* P1, P2 regions are pass-through (no translation) */
   698 		return VMA_TO_EXT_ADDR(addr);
   699 	    }
   700 	} else {
   701 	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   702 		((mmucr&MMUCR_SQMD) == 0) ) {
   703 		/* Conditional user-mode access to the store-queue (no translation) */
   704 		return addr;
   705 	    }
   706 	    MMU_WRITE_ADDR_ERROR();
   707 	    return MMU_VMA_ERROR;
   708 	}
   709     }
   711     if( (mmucr & MMUCR_AT) == 0 ) {
   712 	return VMA_TO_EXT_ADDR(addr);
   713     }
   715     /* If we get this far, translation is required */
   716     int entryNo;
   717     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   718 	entryNo = mmu_utlb_lookup_vpn_asid( addr );
   719     } else {
   720 	entryNo = mmu_utlb_lookup_vpn( addr );
   721     }
   723     switch(entryNo) {
   724     case -1:
   725 	MMU_TLB_WRITE_MISS_ERROR(addr);
   726 	return MMU_VMA_ERROR;
   727     case -2:
   728 	MMU_TLB_MULTI_HIT_ERROR(addr);
   729 	return MMU_VMA_ERROR;
   730     default:
   731 	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   732 	    : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   733 	    /* protection violation */
   734 	    MMU_TLB_WRITE_PROT_ERROR(addr);
   735 	    return MMU_VMA_ERROR;
   736 	}
   738 	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   739 	    MMU_TLB_INITIAL_WRITE_ERROR(addr);
   740 	    return MMU_VMA_ERROR;
   741 	}
   743 	/* finally generate the target address */
   744 	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   745 	    (addr & (~mmu_utlb[entryNo].mask));
   746     }
   747 }
   749 /**
   750  * Update the icache for an untranslated address
   751  */
   752 void mmu_update_icache_phys( sh4addr_t addr )
   753 {
   754     if( (addr & 0x1C000000) == 0x0C000000 ) {
   755 	/* Main ram */
   756 	sh4_icache.page_vma = addr & 0xFF000000;
   757 	sh4_icache.page_ppa = 0x0C000000;
   758 	sh4_icache.mask = 0xFF000000;
   759 	sh4_icache.page = sh4_main_ram;
   760     } else if( (addr & 0x1FE00000) == 0 ) {
   761 	/* BIOS ROM */
   762 	sh4_icache.page_vma = addr & 0xFFE00000;
   763 	sh4_icache.page_ppa = 0;
   764 	sh4_icache.mask = 0xFFE00000;
   765 	sh4_icache.page = mem_get_region(0);
   766     } else {
   767 	/* not supported */
   768 	sh4_icache.page_vma = -1;
   769     }
   770 }
   772 /**
   773  * Update the sh4_icache structure to describe the page(s) containing the
   774  * given vma. If the address does not reference a RAM/ROM region, the icache
   775  * will be invalidated instead.
   776  * If AT is on, this method will raise TLB exceptions normally
   777  * (hence this method should only be used immediately prior to execution of
   778  * code), and otherwise will set the icache according to the matching TLB entry.
   779  * If AT is off, this method will set the entire referenced RAM/ROM region in
   780  * the icache.
   781  * @return TRUE if the update completed (successfully or otherwise), FALSE
   782  * if an exception was raised.
   783  */
   784 gboolean mmu_update_icache( sh4vma_t addr )
   785 {
   786     int entryNo;
   787     if( IS_SH4_PRIVMODE()  ) {
   788 	if( addr & 0x80000000 ) {
   789 	    if( addr < 0xC0000000 ) {
   790 		/* P1, P2 and P4 regions are pass-through (no translation) */
   791 		mmu_update_icache_phys(addr);
   792 		return TRUE;
   793 	    } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   794 		MMU_READ_ADDR_ERROR();
   795 		return FALSE;
   796 	    }
   797 	}
   799 	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   800 	if( (mmucr & MMUCR_AT) == 0 ) {
   801 	    mmu_update_icache_phys(addr);
   802 	    return TRUE;
   803 	}
   805 	entryNo = mmu_itlb_lookup_vpn( addr );
   806     } else {
   807 	if( addr & 0x80000000 ) {
   808 	    MMU_READ_ADDR_ERROR();
   809 	    return FALSE;
   810 	}
   812 	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   813 	if( (mmucr & MMUCR_AT) == 0 ) {
   814 	    mmu_update_icache_phys(addr);
   815 	    return TRUE;
   816 	}
   818 	if( mmucr & MMUCR_SV ) {
   819 	    entryNo = mmu_itlb_lookup_vpn( addr );
   820 	} else {
   821 	    entryNo = mmu_itlb_lookup_vpn_asid( addr );
   822 	}
   823 	if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
   824 	    MMU_TLB_READ_PROT_ERROR(addr);
   825 	    return FALSE;
   826 	}
   827     }
   829     switch(entryNo) {
   830     case -1:
   831 	MMU_TLB_READ_MISS_ERROR(addr);
   832 	return FALSE;
   833     case -2:
   834 	MMU_TLB_MULTI_HIT_ERROR(addr);
   835 	return FALSE;
   836     default:
   837 	sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
   838 	sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
   839 	if( sh4_icache.page == NULL ) {
   840 	    sh4_icache.page_vma = -1;
   841 	} else {
   842 	    sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
   843 	    sh4_icache.mask = mmu_itlb[entryNo].mask;
   844 	}
   845 	return TRUE;
   846     }
   847 }
.