Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 597:87cbdf62aa35
prev586:2a3ba82cf243
next669:ab344e42bca9
author nkeynes
date Tue Jan 22 09:45:21 2008 +0000 (16 years ago)
permissions -rw-r--r--
last change Initial VMA support for the SH4 disassembly
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include "sh4/sh4mmio.h"
    22 #include "sh4/sh4core.h"
    23 #include "mem.h"
    25 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    27 /* The MMU (practically unique in the system) is allowed to raise exceptions
    28  * directly, with a return code indicating that one was raised and the caller
    29  * had better behave appropriately.
    30  */
    31 #define RAISE_TLB_ERROR(code, vpn) \
    32     MMIO_WRITE(MMU, TEA, vpn); \
    33     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    34     sh4_raise_tlb_exception(code);
    36 #define RAISE_MEM_ERROR(code, vpn) \
    37     MMIO_WRITE(MMU, TEA, vpn); \
    38     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    39     sh4_raise_exception(code);
    41 #define RAISE_OTHER_ERROR(code) \
    42     sh4_raise_exception(code);
    43 /**
    44  * Abort with a non-MMU address error. Caused by user-mode code attempting
    45  * to access privileged regions, or alignment faults.
    46  */
    47 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    48 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    50 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    51 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    52 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    53 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    54 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    55 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    56     MMIO_WRITE(MMU, TEA, vpn); \
    57     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    60 #define OCRAM_START (0x1C000000>>PAGE_BITS)
    61 #define OCRAM_END   (0x20000000>>PAGE_BITS)
    63 #define ITLB_ENTRY_COUNT 4
    64 #define UTLB_ENTRY_COUNT 64
    66 /* Entry address */
    67 #define TLB_VALID     0x00000100
    68 #define TLB_USERMODE  0x00000040
    69 #define TLB_WRITABLE  0x00000020
    70 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    71 #define TLB_SIZE_MASK 0x00000090
    72 #define TLB_SIZE_1K   0x00000000
    73 #define TLB_SIZE_4K   0x00000010
    74 #define TLB_SIZE_64K  0x00000080
    75 #define TLB_SIZE_1M   0x00000090
    76 #define TLB_CACHEABLE 0x00000008
    77 #define TLB_DIRTY     0x00000004
    78 #define TLB_SHARE     0x00000002
    79 #define TLB_WRITETHRU 0x00000001
    81 #define MASK_1K  0xFFFFFC00
    82 #define MASK_4K  0xFFFFF000
    83 #define MASK_64K 0xFFFF0000
    84 #define MASK_1M  0xFFF00000
    86 struct itlb_entry {
    87     sh4addr_t vpn; // Virtual Page Number
    88     uint32_t asid; // Process ID
    89     uint32_t mask;
    90     sh4addr_t ppn; // Physical Page Number
    91     uint32_t flags;
    92 };
    94 struct utlb_entry {
    95     sh4addr_t vpn; // Virtual Page Number
    96     uint32_t mask; // Page size mask
    97     uint32_t asid; // Process ID
    98     sh4addr_t ppn; // Physical Page Number
    99     uint32_t flags;
   100     uint32_t pcmcia; // extra pcmcia data - not used
   101 };
   103 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   104 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   105 static uint32_t mmu_urc;
   106 static uint32_t mmu_urb;
   107 static uint32_t mmu_lrui;
   108 static uint32_t mmu_asid; // current asid
   110 static sh4ptr_t cache = NULL;
   112 static void mmu_invalidate_tlb();
   115 static uint32_t get_mask_for_flags( uint32_t flags )
   116 {
   117     switch( flags & TLB_SIZE_MASK ) {
   118     case TLB_SIZE_1K: return MASK_1K;
   119     case TLB_SIZE_4K: return MASK_4K;
   120     case TLB_SIZE_64K: return MASK_64K;
   121     case TLB_SIZE_1M: return MASK_1M;
   122     }
   123 }
   125 int32_t mmio_region_MMU_read( uint32_t reg )
   126 {
   127     switch( reg ) {
   128     case MMUCR:
   129 	return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   130     default:
   131 	return MMIO_READ( MMU, reg );
   132     }
   133 }
   135 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   136 {
   137     uint32_t tmp;
   138     switch(reg) {
   139     case PTEH:
   140 	val &= 0xFFFFFCFF;
   141 	if( (val & 0xFF) != mmu_asid ) {
   142 	    mmu_asid = val&0xFF;
   143 	    sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   144 	}
   145 	break;
   146     case PTEL:
   147 	val &= 0x1FFFFDFF;
   148 	break;
   149     case PTEA:
   150 	val &= 0x0000000F;
   151 	break;
   152     case MMUCR:
   153 	if( val & MMUCR_TI ) {
   154 	    mmu_invalidate_tlb();
   155 	}
   156 	mmu_urc = (val >> 10) & 0x3F;
   157 	mmu_urb = (val >> 18) & 0x3F;
   158 	mmu_lrui = (val >> 26) & 0x3F;
   159 	val &= 0x00000301;
   160 	tmp = MMIO_READ( MMU, MMUCR );
   161 	if( ((val ^ tmp) & MMUCR_AT) && sh4_is_using_xlat() ) {
   162 	    // AT flag has changed state - flush the xlt cache as all bets
   163 	    // are off now. We also need to force an immediate exit from the
   164 	    // current block
   165 	    MMIO_WRITE( MMU, MMUCR, val );
   166 	    sh4_translate_flush_cache();
   167 	}
   168 	break;
   169     case CCR:
   170 	mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
   171 	break;
   172     default:
   173 	break;
   174     }
   175     MMIO_WRITE( MMU, reg, val );
   176 }
   179 void MMU_init() 
   180 {
   181     cache = mem_alloc_pages(2);
   182 }
   184 void MMU_reset()
   185 {
   186     mmio_region_MMU_write( CCR, 0 );
   187     mmio_region_MMU_write( MMUCR, 0 );
   188 }
   190 void MMU_save_state( FILE *f )
   191 {
   192     fwrite( cache, 4096, 2, f );
   193     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   194     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   195     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   196     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   197     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   198     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   199 }
   201 int MMU_load_state( FILE *f )
   202 {
   203     /* Setup the cache mode according to the saved register value
   204      * (mem_load runs before this point to load all MMIO data)
   205      */
   206     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   207     if( fread( cache, 4096, 2, f ) != 2 ) {
   208 	return 1;
   209     }
   210     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   211 	return 1;
   212     }
   213     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   214 	return 1;
   215     }
   216     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   217 	return 1;
   218     }
   219     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   220 	return 1;
   221     }
   222     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   223 	return 1;
   224     }
   225     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   226     	return 1;
   227     }
   228     return 0;
   229 }
   231 void mmu_set_cache_mode( int mode )
   232 {
   233     uint32_t i;
   234     switch( mode ) {
   235         case MEM_OC_INDEX0: /* OIX=0 */
   236             for( i=OCRAM_START; i<OCRAM_END; i++ )
   237                 page_map[i] = cache + ((i&0x02)<<(PAGE_BITS-1));
   238             break;
   239         case MEM_OC_INDEX1: /* OIX=1 */
   240             for( i=OCRAM_START; i<OCRAM_END; i++ )
   241                 page_map[i] = cache + ((i&0x02000000)>>(25-PAGE_BITS));
   242             break;
   243         default: /* disabled */
   244             for( i=OCRAM_START; i<OCRAM_END; i++ )
   245                 page_map[i] = NULL;
   246             break;
   247     }
   248 }
   250 /* TLB maintanence */
   252 /**
   253  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   254  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   255  */
   256 void MMU_ldtlb()
   257 {
   258     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   259     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   260     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   261     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   262     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   263     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   264 }
   266 static void mmu_invalidate_tlb()
   267 {
   268     int i;
   269     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   270 	mmu_itlb[i].flags &= (~TLB_VALID);
   271     }
   272     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   273 	mmu_utlb[i].flags &= (~TLB_VALID);
   274     }
   275 }
   277 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   279 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   280 {
   281     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   282     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   283 }
   284 int32_t mmu_itlb_data_read( sh4addr_t addr )
   285 {
   286     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   287     return ent->ppn | ent->flags;
   288 }
   290 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   291 {
   292     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   293     ent->vpn = val & 0xFFFFFC00;
   294     ent->asid = val & 0x000000FF;
   295     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   296 }
   298 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   299 {
   300     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   301     ent->ppn = val & 0x1FFFFC00;
   302     ent->flags = val & 0x00001DA;
   303     ent->mask = get_mask_for_flags(val);
   304 }
   306 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   307 #define UTLB_ASSOC(addr) (addr&0x80)
   308 #define UTLB_DATA2(addr) (addr&0x00800000)
   310 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   311 {
   312     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   313     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   314 	((ent->flags & TLB_DIRTY)<<7);
   315 }
   316 int32_t mmu_utlb_data_read( sh4addr_t addr )
   317 {
   318     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   319     if( UTLB_DATA2(addr) ) {
   320 	return ent->pcmcia;
   321     } else {
   322 	return ent->ppn | ent->flags;
   323     }
   324 }
   326 /**
   327  * Find a UTLB entry for the associative TLB write - same as the normal
   328  * lookup but ignores the valid bit.
   329  */
   330 static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   331 {
   332     int result = -1;
   333     unsigned int i;
   334     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   335 	if( (mmu_utlb[i].flags & TLB_VALID) &&
   336 	    ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
   337 	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   338 	    if( result != -1 ) {
   339 		fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   340 		return -2;
   341 	    }
   342 	    result = i;
   343 	}
   344     }
   345     return result;
   346 }
   348 /**
   349  * Find a ITLB entry for the associative TLB write - same as the normal
   350  * lookup but ignores the valid bit.
   351  */
   352 static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   353 {
   354     int result = -1;
   355     unsigned int i;
   356     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   357 	if( (mmu_itlb[i].flags & TLB_VALID) &&
   358 	    ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
   359 	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   360 	    if( result != -1 ) {
   361 		return -2;
   362 	    }
   363 	    result = i;
   364 	}
   365     }
   366     return result;
   367 }
   369 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   370 {
   371     if( UTLB_ASSOC(addr) ) {
   372 	int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   373 	if( utlb >= 0 ) {
   374 	    struct utlb_entry *ent = &mmu_utlb[utlb];
   375 	    ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   376 	    ent->flags |= (val & TLB_VALID);
   377 	    ent->flags |= ((val & 0x200)>>7);
   378 	}
   380 	int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   381 	if( itlb >= 0 ) {
   382 	    struct itlb_entry *ent = &mmu_itlb[itlb];
   383 	    ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   384 	}
   386 	if( itlb == -2 || utlb == -2 ) {
   387 	    MMU_TLB_MULTI_HIT_ERROR(addr);
   388 	    return;
   389 	}
   390     } else {
   391 	struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   392 	ent->vpn = (val & 0xFFFFFC00);
   393 	ent->asid = (val & 0xFF);
   394 	ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   395 	ent->flags |= (val & TLB_VALID);
   396 	ent->flags |= ((val & 0x200)>>7);
   397     }
   398 }
   400 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   401 {
   402     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   403     if( UTLB_DATA2(addr) ) {
   404 	ent->pcmcia = val & 0x0000000F;
   405     } else {
   406 	ent->ppn = (val & 0x1FFFFC00);
   407 	ent->flags = (val & 0x000001FF);
   408 	ent->mask = get_mask_for_flags(val);
   409     }
   410 }
   412 /* Cache access - not implemented */
   414 int32_t mmu_icache_addr_read( sh4addr_t addr )
   415 {
   416     return 0; // not implemented
   417 }
   418 int32_t mmu_icache_data_read( sh4addr_t addr )
   419 {
   420     return 0; // not implemented
   421 }
   422 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   423 {
   424     return 0; // not implemented
   425 }
   426 int32_t mmu_ocache_data_read( sh4addr_t addr )
   427 {
   428     return 0; // not implemented
   429 }
   431 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   432 {
   433 }
   435 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   436 {
   437 }
   439 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   440 {
   441 }
   443 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   444 {
   445 }
   447 /******************************************************************************/
   448 /*                        MMU TLB address translation                         */
   449 /******************************************************************************/
   451 /**
   452  * The translations are excessively complicated, but unfortunately it's a 
   453  * complicated system. TODO: make this not be painfully slow.
   454  */
   456 /**
   457  * Perform the actual utlb lookup w/ asid matching.
   458  * Possible utcomes are:
   459  *   0..63 Single match - good, return entry found
   460  *   -1 No match - raise a tlb data miss exception
   461  *   -2 Multiple matches - raise a multi-hit exception (reset)
   462  * @param vpn virtual address to resolve
   463  * @return the resultant UTLB entry, or an error.
   464  */
   465 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   466 {
   467     int result = -1;
   468     unsigned int i;
   470     mmu_urc++;
   471     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   472 	mmu_urc = 0;
   473     }
   475     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   476 	if( (mmu_utlb[i].flags & TLB_VALID) &&
   477 	    ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) && 
   478 	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   479 	    if( result != -1 ) {
   480 		return -2;
   481 	    }
   482 	    result = i;
   483 	}
   484     }
   485     return result;
   486 }
   488 /**
   489  * Perform the actual utlb lookup matching on vpn only
   490  * Possible utcomes are:
   491  *   0..63 Single match - good, return entry found
   492  *   -1 No match - raise a tlb data miss exception
   493  *   -2 Multiple matches - raise a multi-hit exception (reset)
   494  * @param vpn virtual address to resolve
   495  * @return the resultant UTLB entry, or an error.
   496  */
   497 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   498 {
   499     int result = -1;
   500     unsigned int i;
   502     mmu_urc++;
   503     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   504 	mmu_urc = 0;
   505     }
   507     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   508 	if( (mmu_utlb[i].flags & TLB_VALID) &&
   509 	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   510 	    if( result != -1 ) {
   511 		return -2;
   512 	    }
   513 	    result = i;
   514 	}
   515     }
   517     return result;
   518 }
   520 /**
   521  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   522  * @return the number (0-3) of the replaced entry.
   523  */
   524 static int inline mmu_itlb_update_from_utlb( int entryNo )
   525 {
   526     int replace;
   527     /* Determine entry to replace based on lrui */
   528     if( (mmu_lrui & 0x38) == 0x38 ) {
   529 	replace = 0;
   530 	mmu_lrui = mmu_lrui & 0x07;
   531     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   532 	replace = 1;
   533 	mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   534     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   535 	replace = 2;
   536 	mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   537     } else { // Note - gets invalid entries too
   538 	replace = 3;
   539 	mmu_lrui = (mmu_lrui | 0x0B);
   540     } 
   542     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   543     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   544     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   545     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   546     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   547     return replace;
   548 }
   550 /**
   551  * Perform the actual itlb lookup w/ asid protection
   552  * Possible utcomes are:
   553  *   0..63 Single match - good, return entry found
   554  *   -1 No match - raise a tlb data miss exception
   555  *   -2 Multiple matches - raise a multi-hit exception (reset)
   556  * @param vpn virtual address to resolve
   557  * @return the resultant ITLB entry, or an error.
   558  */
   559 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   560 {
   561     int result = -1;
   562     unsigned int i;
   564     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   565 	if( (mmu_itlb[i].flags & TLB_VALID) &&
   566 	    ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) && 
   567 	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   568 	    if( result != -1 ) {
   569 		return -2;
   570 	    }
   571 	    result = i;
   572 	}
   573     }
   575     if( result == -1 ) {
   576 	int utlbEntry = mmu_utlb_lookup_vpn_asid( vpn );
   577 	if( utlbEntry < 0 ) {
   578 	    return utlbEntry;
   579 	} else {
   580 	    return mmu_itlb_update_from_utlb( utlbEntry );
   581 	}
   582     }
   584     switch( result ) {
   585     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   586     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   587     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   588     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   589     }
   591     return result;
   592 }
   594 /**
   595  * Perform the actual itlb lookup on vpn only
   596  * Possible utcomes are:
   597  *   0..63 Single match - good, return entry found
   598  *   -1 No match - raise a tlb data miss exception
   599  *   -2 Multiple matches - raise a multi-hit exception (reset)
   600  * @param vpn virtual address to resolve
   601  * @return the resultant ITLB entry, or an error.
   602  */
   603 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   604 {
   605     int result = -1;
   606     unsigned int i;
   608     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   609 	if( (mmu_itlb[i].flags & TLB_VALID) &&
   610 	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   611 	    if( result != -1 ) {
   612 		return -2;
   613 	    }
   614 	    result = i;
   615 	}
   616     }
   618     if( result == -1 ) {
   619 	int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   620 	if( utlbEntry < 0 ) {
   621 	    return utlbEntry;
   622 	} else {
   623 	    return mmu_itlb_update_from_utlb( utlbEntry );
   624 	}
   625     }
   627     switch( result ) {
   628     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   629     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   630     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   631     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   632     }
   634     return result;
   635 }
   637 sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
   638 {
   639     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   640     if( addr & 0x80000000 ) {
   641 	if( IS_SH4_PRIVMODE() ) {
   642 	    if( addr >= 0xE0000000 ) {
   643 		return addr; /* P4 - passthrough */
   644 	    } else if( addr < 0xC0000000 ) {
   645 		/* P1, P2 regions are pass-through (no translation) */
   646 		return VMA_TO_EXT_ADDR(addr);
   647 	    }
   648 	} else {
   649 	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   650 		((mmucr&MMUCR_SQMD) == 0) ) {
   651 		/* Conditional user-mode access to the store-queue (no translation) */
   652 		return addr;
   653 	    }
   654 	    MMU_READ_ADDR_ERROR();
   655 	    return MMU_VMA_ERROR;
   656 	}
   657     }
   659     if( (mmucr & MMUCR_AT) == 0 ) {
   660 	return VMA_TO_EXT_ADDR(addr);
   661     }
   663     /* If we get this far, translation is required */
   664     int entryNo;
   665     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   666 	entryNo = mmu_utlb_lookup_vpn_asid( addr );
   667     } else {
   668 	entryNo = mmu_utlb_lookup_vpn( addr );
   669     }
   671     switch(entryNo) {
   672     case -1:
   673 	MMU_TLB_READ_MISS_ERROR(addr);
   674 	return MMU_VMA_ERROR;
   675     case -2:
   676 	MMU_TLB_MULTI_HIT_ERROR(addr);
   677 	return MMU_VMA_ERROR;
   678     default:
   679 	if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   680 	    !IS_SH4_PRIVMODE() ) {
   681 	    /* protection violation */
   682 	    MMU_TLB_READ_PROT_ERROR(addr);
   683 	    return MMU_VMA_ERROR;
   684 	}
   686 	/* finally generate the target address */
   687 	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   688 	    (addr & (~mmu_utlb[entryNo].mask));
   689     }
   690 }
   692 sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
   693 {
   694     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   695     if( addr & 0x80000000 ) {
   696 	if( IS_SH4_PRIVMODE() ) {
   697 	    if( addr >= 0xE0000000 ) {
   698 		return addr; /* P4 - passthrough */
   699 	    } else if( addr < 0xC0000000 ) {
   700 		/* P1, P2 regions are pass-through (no translation) */
   701 		return VMA_TO_EXT_ADDR(addr);
   702 	    }
   703 	} else {
   704 	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   705 		((mmucr&MMUCR_SQMD) == 0) ) {
   706 		/* Conditional user-mode access to the store-queue (no translation) */
   707 		return addr;
   708 	    }
   709 	    MMU_WRITE_ADDR_ERROR();
   710 	    return MMU_VMA_ERROR;
   711 	}
   712     }
   714     if( (mmucr & MMUCR_AT) == 0 ) {
   715 	return VMA_TO_EXT_ADDR(addr);
   716     }
   718     /* If we get this far, translation is required */
   719     int entryNo;
   720     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   721 	entryNo = mmu_utlb_lookup_vpn_asid( addr );
   722     } else {
   723 	entryNo = mmu_utlb_lookup_vpn( addr );
   724     }
   726     switch(entryNo) {
   727     case -1:
   728 	MMU_TLB_WRITE_MISS_ERROR(addr);
   729 	return MMU_VMA_ERROR;
   730     case -2:
   731 	MMU_TLB_MULTI_HIT_ERROR(addr);
   732 	return MMU_VMA_ERROR;
   733     default:
   734 	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   735 	    : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   736 	    /* protection violation */
   737 	    MMU_TLB_WRITE_PROT_ERROR(addr);
   738 	    return MMU_VMA_ERROR;
   739 	}
   741 	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   742 	    MMU_TLB_INITIAL_WRITE_ERROR(addr);
   743 	    return MMU_VMA_ERROR;
   744 	}
   746 	/* finally generate the target address */
   747 	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   748 	    (addr & (~mmu_utlb[entryNo].mask));
   749     }
   750 }
   752 /**
   753  * Update the icache for an untranslated address
   754  */
   755 void mmu_update_icache_phys( sh4addr_t addr )
   756 {
   757     if( (addr & 0x1C000000) == 0x0C000000 ) {
   758 	/* Main ram */
   759 	sh4_icache.page_vma = addr & 0xFF000000;
   760 	sh4_icache.page_ppa = 0x0C000000;
   761 	sh4_icache.mask = 0xFF000000;
   762 	sh4_icache.page = sh4_main_ram;
   763     } else if( (addr & 0x1FE00000) == 0 ) {
   764 	/* BIOS ROM */
   765 	sh4_icache.page_vma = addr & 0xFFE00000;
   766 	sh4_icache.page_ppa = 0;
   767 	sh4_icache.mask = 0xFFE00000;
   768 	sh4_icache.page = mem_get_region(0);
   769     } else {
   770 	/* not supported */
   771 	sh4_icache.page_vma = -1;
   772     }
   773 }
   775 /**
   776  * Update the sh4_icache structure to describe the page(s) containing the
   777  * given vma. If the address does not reference a RAM/ROM region, the icache
   778  * will be invalidated instead.
   779  * If AT is on, this method will raise TLB exceptions normally
   780  * (hence this method should only be used immediately prior to execution of
   781  * code), and otherwise will set the icache according to the matching TLB entry.
   782  * If AT is off, this method will set the entire referenced RAM/ROM region in
   783  * the icache.
   784  * @return TRUE if the update completed (successfully or otherwise), FALSE
   785  * if an exception was raised.
   786  */
   787 gboolean mmu_update_icache( sh4vma_t addr )
   788 {
   789     int entryNo;
   790     if( IS_SH4_PRIVMODE()  ) {
   791 	if( addr & 0x80000000 ) {
   792 	    if( addr < 0xC0000000 ) {
   793 		/* P1, P2 and P4 regions are pass-through (no translation) */
   794 		mmu_update_icache_phys(addr);
   795 		return TRUE;
   796 	    } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   797 		MMU_READ_ADDR_ERROR();
   798 		return FALSE;
   799 	    }
   800 	}
   802 	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   803 	if( (mmucr & MMUCR_AT) == 0 ) {
   804 	    mmu_update_icache_phys(addr);
   805 	    return TRUE;
   806 	}
   808 	entryNo = mmu_itlb_lookup_vpn( addr );
   809     } else {
   810 	if( addr & 0x80000000 ) {
   811 	    MMU_READ_ADDR_ERROR();
   812 	    return FALSE;
   813 	}
   815 	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   816 	if( (mmucr & MMUCR_AT) == 0 ) {
   817 	    mmu_update_icache_phys(addr);
   818 	    return TRUE;
   819 	}
   821 	if( mmucr & MMUCR_SV ) {
   822 	    entryNo = mmu_itlb_lookup_vpn( addr );
   823 	} else {
   824 	    entryNo = mmu_itlb_lookup_vpn_asid( addr );
   825 	}
   826 	if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
   827 	    MMU_TLB_READ_PROT_ERROR(addr);
   828 	    return FALSE;
   829 	}
   830     }
   832     switch(entryNo) {
   833     case -1:
   834 	MMU_TLB_READ_MISS_ERROR(addr);
   835 	return FALSE;
   836     case -2:
   837 	MMU_TLB_MULTI_HIT_ERROR(addr);
   838 	return FALSE;
   839     default:
   840 	sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
   841 	sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
   842 	if( sh4_icache.page == NULL ) {
   843 	    sh4_icache.page_vma = -1;
   844 	} else {
   845 	    sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
   846 	    sh4_icache.mask = mmu_itlb[entryNo].mask;
   847 	}
   848 	return TRUE;
   849     }
   850 }
   852 /**
   853  * Translate address for disassembly purposes (ie performs an instruction 
   854  * lookup) - does not raise exceptions or modify any state, and ignores
   855  * protection bits. Returns the translated address, or MMU_VMA_ERROR
   856  * on translation failure. 
   857  */
   858 sh4addr_t mmu_vma_to_phys_disasm( sh4vma_t vma )
   859 {
   860     if( vma & 0x80000000 ) {
   861 	if( vma < 0xC0000000 ) {
   862 	    /* P1, P2 and P4 regions are pass-through (no translation) */
   863 	    return VMA_TO_EXT_ADDR(vma);
   864 	} else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
   865 	    /* Not translatable */
   866 	    return MMU_VMA_ERROR;
   867 	}
   868     }
   870     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   871     if( (mmucr & MMUCR_AT) == 0 ) {
   872 	return VMA_TO_EXT_ADDR(vma);
   873     }
   875     int entryNo = mmu_itlb_lookup_vpn( vma );
   876     if( entryNo == -2 ) {
   877 	entryNo = mmu_itlb_lookup_vpn_asid( vma );
   878     }
   879     if( entryNo < 0 ) {
   880 	return MMU_VMA_ERROR;
   881     } else {
   882 	return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) | 
   883 	    (vma & (~mmu_itlb[entryNo].mask));	
   884     }
   885 }
   887 gboolean sh4_flush_store_queue( sh4addr_t addr )
   888 {
   889     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   890     int queue = (addr&0x20)>>2;
   891     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
   892     sh4addr_t target;
   893     /* Store queue operation */
   894     if( mmucr & MMUCR_AT ) {
   895 	int entryNo;
   896 	if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   897 	    entryNo = mmu_utlb_lookup_vpn_asid( addr );
   898 	} else {
   899 	    entryNo = mmu_utlb_lookup_vpn( addr );
   900 	}
   901 	switch(entryNo) {
   902 	case -1:
   903 	    MMU_TLB_WRITE_MISS_ERROR(addr);
   904 	    return FALSE;
   905 	case -2:
   906 	    MMU_TLB_MULTI_HIT_ERROR(addr);
   907 	    return FALSE;
   908 	default:
   909 	    if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   910 		: ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   911 		/* protection violation */
   912 		MMU_TLB_WRITE_PROT_ERROR(addr);
   913 		return FALSE;
   914 	    }
   916 	    if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   917 		MMU_TLB_INITIAL_WRITE_ERROR(addr);
   918 		return FALSE;
   919 	    }
   921 	    /* finally generate the target address */
   922 	    target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   923 		      (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
   924 	}
   925     } else {
   926 	uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
   927 	target = (addr&0x03FFFFE0) | hi;
   928     }
   929     mem_copy_to_sh4( target, src, 32 );
   930     return TRUE;
   931 }
.