Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 807:1ca418e6ed5d
prev796:a2dc83592467
next810:833cc4960556
author nkeynes
date Sat Aug 09 07:39:47 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Fix ITLB lookup ASID behaviour (was somewhat back-to-front)
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include "sh4/sh4mmio.h"
    22 #include "sh4/sh4core.h"
    23 #include "sh4/sh4trans.h"
    24 #include "mem.h"
    26 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    28 /* The MMU (practically unique in the system) is allowed to raise exceptions
    29  * directly, with a return code indicating that one was raised and the caller
    30  * had better behave appropriately.
    31  */
    32 #define RAISE_TLB_ERROR(code, vpn) \
    33     MMIO_WRITE(MMU, TEA, vpn); \
    34     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    35     sh4_raise_tlb_exception(code);
    37 #define RAISE_MEM_ERROR(code, vpn) \
    38     MMIO_WRITE(MMU, TEA, vpn); \
    39     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    40     sh4_raise_exception(code);
    42 #define RAISE_OTHER_ERROR(code) \
    43     sh4_raise_exception(code);
    44 /**
    45  * Abort with a non-MMU address error. Caused by user-mode code attempting
    46  * to access privileged regions, or alignment faults.
    47  */
    48 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    49 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    51 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    52 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    53 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    54 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    55 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    56 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    57     MMIO_WRITE(MMU, TEA, vpn); \
    58     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    61 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
    62 #define OCRAM_END   (0x20000000>>LXDREAM_PAGE_BITS)
    64 #define ITLB_ENTRY_COUNT 4
    65 #define UTLB_ENTRY_COUNT 64
    67 /* Entry address */
    68 #define TLB_VALID     0x00000100
    69 #define TLB_USERMODE  0x00000040
    70 #define TLB_WRITABLE  0x00000020
    71 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    72 #define TLB_SIZE_MASK 0x00000090
    73 #define TLB_SIZE_1K   0x00000000
    74 #define TLB_SIZE_4K   0x00000010
    75 #define TLB_SIZE_64K  0x00000080
    76 #define TLB_SIZE_1M   0x00000090
    77 #define TLB_CACHEABLE 0x00000008
    78 #define TLB_DIRTY     0x00000004
    79 #define TLB_SHARE     0x00000002
    80 #define TLB_WRITETHRU 0x00000001
    82 #define MASK_1K  0xFFFFFC00
    83 #define MASK_4K  0xFFFFF000
    84 #define MASK_64K 0xFFFF0000
    85 #define MASK_1M  0xFFF00000
    87 struct itlb_entry {
    88     sh4addr_t vpn; // Virtual Page Number
    89     uint32_t asid; // Process ID
    90     uint32_t mask;
    91     sh4addr_t ppn; // Physical Page Number
    92     uint32_t flags;
    93 };
    95 struct utlb_entry {
    96     sh4addr_t vpn; // Virtual Page Number
    97     uint32_t mask; // Page size mask
    98     uint32_t asid; // Process ID
    99     sh4addr_t ppn; // Physical Page Number
   100     uint32_t flags;
   101     uint32_t pcmcia; // extra pcmcia data - not used
   102 };
   104 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   105 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   106 static uint32_t mmu_urc;
   107 static uint32_t mmu_urb;
   108 static uint32_t mmu_lrui;
   109 static uint32_t mmu_asid; // current asid
   111 static sh4ptr_t cache = NULL;
   113 static void mmu_invalidate_tlb();
   116 static uint32_t get_mask_for_flags( uint32_t flags )
   117 {
   118     switch( flags & TLB_SIZE_MASK ) {
   119     case TLB_SIZE_1K: return MASK_1K;
   120     case TLB_SIZE_4K: return MASK_4K;
   121     case TLB_SIZE_64K: return MASK_64K;
   122     case TLB_SIZE_1M: return MASK_1M;
   123     default: return 0; /* Unreachable */
   124     }
   125 }
   127 int32_t mmio_region_MMU_read( uint32_t reg )
   128 {
   129     switch( reg ) {
   130     case MMUCR:
   131         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   132     default:
   133         return MMIO_READ( MMU, reg );
   134     }
   135 }
   137 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   138 {
   139     uint32_t tmp;
   140     switch(reg) {
   141     case PTEH:
   142         val &= 0xFFFFFCFF;
   143         if( (val & 0xFF) != mmu_asid ) {
   144             mmu_asid = val&0xFF;
   145             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   146         }
   147         break;
   148     case PTEL:
   149         val &= 0x1FFFFDFF;
   150         break;
   151     case PTEA:
   152         val &= 0x0000000F;
   153         break;
   154     case MMUCR:
   155         if( val & MMUCR_TI ) {
   156             mmu_invalidate_tlb();
   157         }
   158         mmu_urc = (val >> 10) & 0x3F;
   159         mmu_urb = (val >> 18) & 0x3F;
   160         mmu_lrui = (val >> 26) & 0x3F;
   161         val &= 0x00000301;
   162         tmp = MMIO_READ( MMU, MMUCR );
   163         if( (val ^ tmp) & MMUCR_AT ) {
   164             // AT flag has changed state - flush the xlt cache as all bets
   165             // are off now. We also need to force an immediate exit from the
   166             // current block
   167             MMIO_WRITE( MMU, MMUCR, val );
   168             sh4_flush_icache();
   169         }
   170         break;
   171     case CCR:
   172         mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
   173         break;
   174     default:
   175         break;
   176     }
   177     MMIO_WRITE( MMU, reg, val );
   178 }
   181 void MMU_init() 
   182 {
   183     cache = mem_alloc_pages(2);
   184 }
   186 void MMU_reset()
   187 {
   188     mmio_region_MMU_write( CCR, 0 );
   189     mmio_region_MMU_write( MMUCR, 0 );
   190 }
   192 void MMU_save_state( FILE *f )
   193 {
   194     fwrite( cache, 4096, 2, f );
   195     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   196     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   197     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   198     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   199     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   200     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   201 }
   203 int MMU_load_state( FILE *f )
   204 {
   205     /* Setup the cache mode according to the saved register value
   206      * (mem_load runs before this point to load all MMIO data)
   207      */
   208     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   209     if( fread( cache, 4096, 2, f ) != 2 ) {
   210         return 1;
   211     }
   212     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   213         return 1;
   214     }
   215     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   216         return 1;
   217     }
   218     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   219         return 1;
   220     }
   221     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   222         return 1;
   223     }
   224     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   225         return 1;
   226     }
   227     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   228         return 1;
   229     }
   230     return 0;
   231 }
   233 void mmu_set_cache_mode( int mode )
   234 {
   235     uint32_t i;
   236     switch( mode ) {
   237     case MEM_OC_INDEX0: /* OIX=0 */
   238         for( i=OCRAM_START; i<OCRAM_END; i++ )
   239             page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
   240         break;
   241     case MEM_OC_INDEX1: /* OIX=1 */
   242         for( i=OCRAM_START; i<OCRAM_END; i++ )
   243             page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
   244         break;
   245     default: /* disabled */
   246         for( i=OCRAM_START; i<OCRAM_END; i++ )
   247             page_map[i] = NULL;
   248         break;
   249     }
   250 }
   252 /* TLB maintanence */
   254 /**
   255  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   256  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   257  */
   258 void MMU_ldtlb()
   259 {
   260     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   261     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   262     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   263     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   264     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   265     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   266 }
   268 static void mmu_invalidate_tlb()
   269 {
   270     int i;
   271     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   272         mmu_itlb[i].flags &= (~TLB_VALID);
   273     }
   274     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   275         mmu_utlb[i].flags &= (~TLB_VALID);
   276     }
   277 }
   279 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   281 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   282 {
   283     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   284     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   285 }
   286 int32_t mmu_itlb_data_read( sh4addr_t addr )
   287 {
   288     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   289     return ent->ppn | ent->flags;
   290 }
   292 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   293 {
   294     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   295     ent->vpn = val & 0xFFFFFC00;
   296     ent->asid = val & 0x000000FF;
   297     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   298 }
   300 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   301 {
   302     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   303     ent->ppn = val & 0x1FFFFC00;
   304     ent->flags = val & 0x00001DA;
   305     ent->mask = get_mask_for_flags(val);
   306 }
   308 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   309 #define UTLB_ASSOC(addr) (addr&0x80)
   310 #define UTLB_DATA2(addr) (addr&0x00800000)
   312 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   313 {
   314     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   315     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   316     ((ent->flags & TLB_DIRTY)<<7);
   317 }
   318 int32_t mmu_utlb_data_read( sh4addr_t addr )
   319 {
   320     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   321     if( UTLB_DATA2(addr) ) {
   322         return ent->pcmcia;
   323     } else {
   324         return ent->ppn | ent->flags;
   325     }
   326 }
   328 /**
   329  * Find a UTLB entry for the associative TLB write - same as the normal
   330  * lookup but ignores the valid bit.
   331  */
   332 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   333 {
   334     int result = -1;
   335     unsigned int i;
   336     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   337         if( (mmu_utlb[i].flags & TLB_VALID) &&
   338                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
   339                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   340             if( result != -1 ) {
   341                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   342                 return -2;
   343             }
   344             result = i;
   345         }
   346     }
   347     return result;
   348 }
   350 /**
   351  * Find a ITLB entry for the associative TLB write - same as the normal
   352  * lookup but ignores the valid bit.
   353  */
   354 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   355 {
   356     int result = -1;
   357     unsigned int i;
   358     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   359         if( (mmu_itlb[i].flags & TLB_VALID) &&
   360                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
   361                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   362             if( result != -1 ) {
   363                 return -2;
   364             }
   365             result = i;
   366         }
   367     }
   368     return result;
   369 }
   371 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   372 {
   373     if( UTLB_ASSOC(addr) ) {
   374         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   375         if( utlb >= 0 ) {
   376             struct utlb_entry *ent = &mmu_utlb[utlb];
   377             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   378             ent->flags |= (val & TLB_VALID);
   379             ent->flags |= ((val & 0x200)>>7);
   380         }
   382         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   383         if( itlb >= 0 ) {
   384             struct itlb_entry *ent = &mmu_itlb[itlb];
   385             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   386         }
   388         if( itlb == -2 || utlb == -2 ) {
   389             MMU_TLB_MULTI_HIT_ERROR(addr);
   390             return;
   391         }
   392     } else {
   393         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   394         ent->vpn = (val & 0xFFFFFC00);
   395         ent->asid = (val & 0xFF);
   396         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   397         ent->flags |= (val & TLB_VALID);
   398         ent->flags |= ((val & 0x200)>>7);
   399     }
   400 }
   402 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   403 {
   404     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   405     if( UTLB_DATA2(addr) ) {
   406         ent->pcmcia = val & 0x0000000F;
   407     } else {
   408         ent->ppn = (val & 0x1FFFFC00);
   409         ent->flags = (val & 0x000001FF);
   410         ent->mask = get_mask_for_flags(val);
   411     }
   412 }
   414 /* Cache access - not implemented */
   416 int32_t mmu_icache_addr_read( sh4addr_t addr )
   417 {
   418     return 0; // not implemented
   419 }
   420 int32_t mmu_icache_data_read( sh4addr_t addr )
   421 {
   422     return 0; // not implemented
   423 }
   424 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   425 {
   426     return 0; // not implemented
   427 }
   428 int32_t mmu_ocache_data_read( sh4addr_t addr )
   429 {
   430     return 0; // not implemented
   431 }
   433 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   434 {
   435 }
   437 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   438 {
   439 }
   441 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   442 {
   443 }
   445 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   446 {
   447 }
   449 /******************************************************************************/
   450 /*                        MMU TLB address translation                         */
   451 /******************************************************************************/
   453 /**
   454  * The translations are excessively complicated, but unfortunately it's a 
   455  * complicated system. TODO: make this not be painfully slow.
   456  */
   458 /**
   459  * Perform the actual utlb lookup w/ asid matching.
   460  * Possible utcomes are:
   461  *   0..63 Single match - good, return entry found
   462  *   -1 No match - raise a tlb data miss exception
   463  *   -2 Multiple matches - raise a multi-hit exception (reset)
   464  * @param vpn virtual address to resolve
   465  * @return the resultant UTLB entry, or an error.
   466  */
   467 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   468 {
   469     int result = -1;
   470     unsigned int i;
   472     mmu_urc++;
   473     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   474         mmu_urc = 0;
   475     }
   477     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   478         if( (mmu_utlb[i].flags & TLB_VALID) &&
   479                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) && 
   480                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   481             if( result != -1 ) {
   482                 return -2;
   483             }
   484             result = i;
   485         }
   486     }
   487     return result;
   488 }
   490 /**
   491  * Perform the actual utlb lookup matching on vpn only
   492  * Possible utcomes are:
   493  *   0..63 Single match - good, return entry found
   494  *   -1 No match - raise a tlb data miss exception
   495  *   -2 Multiple matches - raise a multi-hit exception (reset)
   496  * @param vpn virtual address to resolve
   497  * @return the resultant UTLB entry, or an error.
   498  */
   499 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   500 {
   501     int result = -1;
   502     unsigned int i;
   504     mmu_urc++;
   505     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   506         mmu_urc = 0;
   507     }
   509     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   510         if( (mmu_utlb[i].flags & TLB_VALID) &&
   511                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   512             if( result != -1 ) {
   513                 return -2;
   514             }
   515             result = i;
   516         }
   517     }
   519     return result;
   520 }
   522 /**
   523  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   524  * @return the number (0-3) of the replaced entry.
   525  */
   526 static int inline mmu_itlb_update_from_utlb( int entryNo )
   527 {
   528     int replace;
   529     /* Determine entry to replace based on lrui */
   530     if( (mmu_lrui & 0x38) == 0x38 ) {
   531         replace = 0;
   532         mmu_lrui = mmu_lrui & 0x07;
   533     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   534         replace = 1;
   535         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   536     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   537         replace = 2;
   538         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   539     } else { // Note - gets invalid entries too
   540         replace = 3;
   541         mmu_lrui = (mmu_lrui | 0x0B);
   542     } 
   544     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   545     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   546     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   547     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   548     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   549     return replace;
   550 }
   552 /**
   553  * Perform the actual itlb lookup w/ asid protection
   554  * Possible utcomes are:
   555  *   0..63 Single match - good, return entry found
   556  *   -1 No match - raise a tlb data miss exception
   557  *   -2 Multiple matches - raise a multi-hit exception (reset)
   558  * @param vpn virtual address to resolve
   559  * @return the resultant ITLB entry, or an error.
   560  */
   561 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   562 {
   563     int result = -1;
   564     unsigned int i;
   566     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   567         if( (mmu_itlb[i].flags & TLB_VALID) &&
   568                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) && 
   569                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   570             if( result != -1 ) {
   571                 return -2;
   572             }
   573             result = i;
   574         }
   575     }
   577     if( result == -1 ) {
   578         int utlbEntry = mmu_utlb_lookup_vpn_asid( vpn );
   579         if( utlbEntry < 0 ) {
   580             return utlbEntry;
   581         } else {
   582             return mmu_itlb_update_from_utlb( utlbEntry );
   583         }
   584     }
   586     switch( result ) {
   587     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   588     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   589     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   590     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   591     }
   593     return result;
   594 }
   596 /**
   597  * Perform the actual itlb lookup on vpn only
   598  * Possible utcomes are:
   599  *   0..63 Single match - good, return entry found
   600  *   -1 No match - raise a tlb data miss exception
   601  *   -2 Multiple matches - raise a multi-hit exception (reset)
   602  * @param vpn virtual address to resolve
   603  * @return the resultant ITLB entry, or an error.
   604  */
   605 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   606 {
   607     int result = -1;
   608     unsigned int i;
   610     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   611         if( (mmu_itlb[i].flags & TLB_VALID) &&
   612                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   613             if( result != -1 ) {
   614                 return -2;
   615             }
   616             result = i;
   617         }
   618     }
   620     if( result == -1 ) {
   621         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   622         if( utlbEntry < 0 ) {
   623             return utlbEntry;
   624         } else {
   625             return mmu_itlb_update_from_utlb( utlbEntry );
   626         }
   627     }
   629     switch( result ) {
   630     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   631     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   632     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   633     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   634     }
   636     return result;
   637 }
   639 sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
   640 {
   641     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   642     if( addr & 0x80000000 ) {
   643         if( IS_SH4_PRIVMODE() ) {
   644             if( addr >= 0xE0000000 ) {
   645                 return addr; /* P4 - passthrough */
   646             } else if( addr < 0xC0000000 ) {
   647                 /* P1, P2 regions are pass-through (no translation) */
   648                 return VMA_TO_EXT_ADDR(addr);
   649             }
   650         } else {
   651             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   652                     ((mmucr&MMUCR_SQMD) == 0) ) {
   653                 /* Conditional user-mode access to the store-queue (no translation) */
   654                 return addr;
   655             }
   656             MMU_READ_ADDR_ERROR();
   657             return MMU_VMA_ERROR;
   658         }
   659     }
   661     if( (mmucr & MMUCR_AT) == 0 ) {
   662         return VMA_TO_EXT_ADDR(addr);
   663     }
   665     /* If we get this far, translation is required */
   666     int entryNo;
   667     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   668         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   669     } else {
   670         entryNo = mmu_utlb_lookup_vpn( addr );
   671     }
   673     switch(entryNo) {
   674     case -1:
   675     MMU_TLB_READ_MISS_ERROR(addr);
   676     return MMU_VMA_ERROR;
   677     case -2:
   678     MMU_TLB_MULTI_HIT_ERROR(addr);
   679     return MMU_VMA_ERROR;
   680     default:
   681         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   682                 !IS_SH4_PRIVMODE() ) {
   683             /* protection violation */
   684             MMU_TLB_READ_PROT_ERROR(addr);
   685             return MMU_VMA_ERROR;
   686         }
   688         /* finally generate the target address */
   689         return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   690         (addr & (~mmu_utlb[entryNo].mask));
   691     }
   692 }
   694 sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
   695 {
   696     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   697     if( addr & 0x80000000 ) {
   698         if( IS_SH4_PRIVMODE() ) {
   699             if( addr >= 0xE0000000 ) {
   700                 return addr; /* P4 - passthrough */
   701             } else if( addr < 0xC0000000 ) {
   702                 /* P1, P2 regions are pass-through (no translation) */
   703                 return VMA_TO_EXT_ADDR(addr);
   704             }
   705         } else {
   706             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   707                     ((mmucr&MMUCR_SQMD) == 0) ) {
   708                 /* Conditional user-mode access to the store-queue (no translation) */
   709                 return addr;
   710             }
   711             MMU_WRITE_ADDR_ERROR();
   712             return MMU_VMA_ERROR;
   713         }
   714     }
   716     if( (mmucr & MMUCR_AT) == 0 ) {
   717         return VMA_TO_EXT_ADDR(addr);
   718     }
   720     /* If we get this far, translation is required */
   721     int entryNo;
   722     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   723         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   724     } else {
   725         entryNo = mmu_utlb_lookup_vpn( addr );
   726     }
   728     switch(entryNo) {
   729     case -1:
   730     MMU_TLB_WRITE_MISS_ERROR(addr);
   731     return MMU_VMA_ERROR;
   732     case -2:
   733     MMU_TLB_MULTI_HIT_ERROR(addr);
   734     return MMU_VMA_ERROR;
   735     default:
   736         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   737                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   738             /* protection violation */
   739             MMU_TLB_WRITE_PROT_ERROR(addr);
   740             return MMU_VMA_ERROR;
   741         }
   743         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   744             MMU_TLB_INITIAL_WRITE_ERROR(addr);
   745             return MMU_VMA_ERROR;
   746         }
   748         /* finally generate the target address */
   749         return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   750         (addr & (~mmu_utlb[entryNo].mask));
   751     }
   752 }
   754 /**
   755  * Update the icache for an untranslated address
   756  */
   757 void mmu_update_icache_phys( sh4addr_t addr )
   758 {
   759     if( (addr & 0x1C000000) == 0x0C000000 ) {
   760         /* Main ram */
   761         sh4_icache.page_vma = addr & 0xFF000000;
   762         sh4_icache.page_ppa = 0x0C000000;
   763         sh4_icache.mask = 0xFF000000;
   764         sh4_icache.page = sh4_main_ram;
   765     } else if( (addr & 0x1FE00000) == 0 ) {
   766         /* BIOS ROM */
   767         sh4_icache.page_vma = addr & 0xFFE00000;
   768         sh4_icache.page_ppa = 0;
   769         sh4_icache.mask = 0xFFE00000;
   770         sh4_icache.page = mem_get_region(0);
   771     } else {
   772         /* not supported */
   773         sh4_icache.page_vma = -1;
   774     }
   775 }
   777 /**
   778  * Update the sh4_icache structure to describe the page(s) containing the
   779  * given vma. If the address does not reference a RAM/ROM region, the icache
   780  * will be invalidated instead.
   781  * If AT is on, this method will raise TLB exceptions normally
   782  * (hence this method should only be used immediately prior to execution of
   783  * code), and otherwise will set the icache according to the matching TLB entry.
   784  * If AT is off, this method will set the entire referenced RAM/ROM region in
   785  * the icache.
   786  * @return TRUE if the update completed (successfully or otherwise), FALSE
   787  * if an exception was raised.
   788  */
   789 gboolean mmu_update_icache( sh4vma_t addr )
   790 {
   791     int entryNo;
   792     if( IS_SH4_PRIVMODE()  ) {
   793         if( addr & 0x80000000 ) {
   794             if( addr < 0xC0000000 ) {
   795                 /* P1, P2 and P4 regions are pass-through (no translation) */
   796                 mmu_update_icache_phys(addr);
   797                 return TRUE;
   798             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   799                 MMU_READ_ADDR_ERROR();
   800                 return FALSE;
   801             }
   802         }
   804         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   805         if( (mmucr & MMUCR_AT) == 0 ) {
   806             mmu_update_icache_phys(addr);
   807             return TRUE;
   808         }
   810         if( (mmucr & MMUCR_SV) == 0 ) 
   811         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
   812         else
   813         	entryNo = mmu_itlb_lookup_vpn( addr );
   814     } else {
   815         if( addr & 0x80000000 ) {
   816             MMU_READ_ADDR_ERROR();
   817             return FALSE;
   818         }
   820         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   821         if( (mmucr & MMUCR_AT) == 0 ) {
   822             mmu_update_icache_phys(addr);
   823             return TRUE;
   824         }
   826         entryNo = mmu_itlb_lookup_vpn_asid( addr );
   828         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
   829             MMU_TLB_READ_PROT_ERROR(addr);
   830             return FALSE;
   831         }
   832     }
   834     switch(entryNo) {
   835     case -1:
   836     MMU_TLB_READ_MISS_ERROR(addr);
   837     return FALSE;
   838     case -2:
   839     MMU_TLB_MULTI_HIT_ERROR(addr);
   840     return FALSE;
   841     default:
   842         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
   843         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
   844         if( sh4_icache.page == NULL ) {
   845             sh4_icache.page_vma = -1;
   846         } else {
   847             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
   848             sh4_icache.mask = mmu_itlb[entryNo].mask;
   849         }
   850         return TRUE;
   851     }
   852 }
   854 /**
   855  * Translate address for disassembly purposes (ie performs an instruction 
   856  * lookup) - does not raise exceptions or modify any state, and ignores
   857  * protection bits. Returns the translated address, or MMU_VMA_ERROR
   858  * on translation failure. 
   859  */
   860 sh4addr_t mmu_vma_to_phys_disasm( sh4vma_t vma )
   861 {
   862     if( vma & 0x80000000 ) {
   863         if( vma < 0xC0000000 ) {
   864             /* P1, P2 and P4 regions are pass-through (no translation) */
   865             return VMA_TO_EXT_ADDR(vma);
   866         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
   867             /* Not translatable */
   868             return MMU_VMA_ERROR;
   869         }
   870     }
   872     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   873     if( (mmucr & MMUCR_AT) == 0 ) {
   874         return VMA_TO_EXT_ADDR(vma);
   875     }
   877     int entryNo = mmu_itlb_lookup_vpn( vma );
   878     if( entryNo == -2 ) {
   879         entryNo = mmu_itlb_lookup_vpn_asid( vma );
   880     }
   881     if( entryNo < 0 ) {
   882         return MMU_VMA_ERROR;
   883     } else {
   884         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) | 
   885         (vma & (~mmu_itlb[entryNo].mask));	
   886     }
   887 }
   889 gboolean sh4_flush_store_queue( sh4addr_t addr )
   890 {
   891     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   892     int queue = (addr&0x20)>>2;
   893     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
   894     sh4addr_t target;
   895     /* Store queue operation */
   896     if( mmucr & MMUCR_AT ) {
   897         int entryNo;
   898         if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   899             entryNo = mmu_utlb_lookup_vpn_asid( addr );
   900         } else {
   901             entryNo = mmu_utlb_lookup_vpn( addr );
   902         }
   903         switch(entryNo) {
   904         case -1:
   905         MMU_TLB_WRITE_MISS_ERROR(addr);
   906         return FALSE;
   907         case -2:
   908         MMU_TLB_MULTI_HIT_ERROR(addr);
   909         return FALSE;
   910         default:
   911             if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   912                     : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   913                 /* protection violation */
   914                 MMU_TLB_WRITE_PROT_ERROR(addr);
   915                 return FALSE;
   916             }
   918             if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   919                 MMU_TLB_INITIAL_WRITE_ERROR(addr);
   920                 return FALSE;
   921             }
   923             /* finally generate the target address */
   924             target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   925                     (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
   926         }
   927     } else {
   928         uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
   929         target = (addr&0x03FFFFE0) | hi;
   930     }
   931     mem_copy_to_sh4( target, src, 32 );
   932     return TRUE;
   933 }
.