Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 818:2e08d8237d33
prev817:e9d2d9be7cb6
next819:ef4fec10a63a
author nkeynes
date Tue Aug 19 13:00:46 2008 +0000 (12 years ago)
permissions -rw-r--r--
last change Add semi-documented PVR register at 0xFF000030 (SH4 version identification)
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include "sh4/sh4mmio.h"
    22 #include "sh4/sh4core.h"
    23 #include "sh4/sh4trans.h"
    24 #include "mem.h"
    26 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    28 /* The MMU (practically unique in the system) is allowed to raise exceptions
    29  * directly, with a return code indicating that one was raised and the caller
    30  * had better behave appropriately.
    31  */
    32 #define RAISE_TLB_ERROR(code, vpn) \
    33     MMIO_WRITE(MMU, TEA, vpn); \
    34     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    35     sh4_raise_tlb_exception(code);
    37 #define RAISE_MEM_ERROR(code, vpn) \
    38     MMIO_WRITE(MMU, TEA, vpn); \
    39     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    40     sh4_raise_exception(code);
    42 #define RAISE_OTHER_ERROR(code) \
    43     sh4_raise_exception(code);
    44 /**
    45  * Abort with a non-MMU address error. Caused by user-mode code attempting
    46  * to access privileged regions, or alignment faults.
    47  */
    48 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    49 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    51 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    52 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    53 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    54 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    55 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    56 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    57     MMIO_WRITE(MMU, TEA, vpn); \
    58     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    61 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
    62 #define OCRAM_END   (0x20000000>>LXDREAM_PAGE_BITS)
    64 #define ITLB_ENTRY_COUNT 4
    65 #define UTLB_ENTRY_COUNT 64
    67 /* Entry address */
    68 #define TLB_VALID     0x00000100
    69 #define TLB_USERMODE  0x00000040
    70 #define TLB_WRITABLE  0x00000020
    71 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    72 #define TLB_SIZE_MASK 0x00000090
    73 #define TLB_SIZE_1K   0x00000000
    74 #define TLB_SIZE_4K   0x00000010
    75 #define TLB_SIZE_64K  0x00000080
    76 #define TLB_SIZE_1M   0x00000090
    77 #define TLB_CACHEABLE 0x00000008
    78 #define TLB_DIRTY     0x00000004
    79 #define TLB_SHARE     0x00000002
    80 #define TLB_WRITETHRU 0x00000001
    82 #define MASK_1K  0xFFFFFC00
    83 #define MASK_4K  0xFFFFF000
    84 #define MASK_64K 0xFFFF0000
    85 #define MASK_1M  0xFFF00000
    87 struct itlb_entry {
    88     sh4addr_t vpn; // Virtual Page Number
    89     uint32_t asid; // Process ID
    90     uint32_t mask;
    91     sh4addr_t ppn; // Physical Page Number
    92     uint32_t flags;
    93 };
    95 struct utlb_entry {
    96     sh4addr_t vpn; // Virtual Page Number
    97     uint32_t mask; // Page size mask
    98     uint32_t asid; // Process ID
    99     sh4addr_t ppn; // Physical Page Number
   100     uint32_t flags;
   101     uint32_t pcmcia; // extra pcmcia data - not used
   102 };
   104 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   105 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   106 static uint32_t mmu_urc;
   107 static uint32_t mmu_urb;
   108 static uint32_t mmu_lrui;
   109 static uint32_t mmu_asid; // current asid
   111 static sh4ptr_t cache = NULL;
   113 static void mmu_invalidate_tlb();
   116 static uint32_t get_mask_for_flags( uint32_t flags )
   117 {
   118     switch( flags & TLB_SIZE_MASK ) {
   119     case TLB_SIZE_1K: return MASK_1K;
   120     case TLB_SIZE_4K: return MASK_4K;
   121     case TLB_SIZE_64K: return MASK_64K;
   122     case TLB_SIZE_1M: return MASK_1M;
   123     default: return 0; /* Unreachable */
   124     }
   125 }
   127 int32_t mmio_region_MMU_read( uint32_t reg )
   128 {
   129     switch( reg ) {
   130     case MMUCR:
   131         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   132     default:
   133         return MMIO_READ( MMU, reg );
   134     }
   135 }
   137 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   138 {
   139     uint32_t tmp;
   140     switch(reg) {
   141     case SH4VER:
   142         return;
   143     case PTEH:
   144         val &= 0xFFFFFCFF;
   145         if( (val & 0xFF) != mmu_asid ) {
   146             mmu_asid = val&0xFF;
   147             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   148         }
   149         break;
   150     case PTEL:
   151         val &= 0x1FFFFDFF;
   152         break;
   153     case PTEA:
   154         val &= 0x0000000F;
   155         break;
   156     case MMUCR:
   157         if( val & MMUCR_TI ) {
   158             mmu_invalidate_tlb();
   159         }
   160         mmu_urc = (val >> 10) & 0x3F;
   161         mmu_urb = (val >> 18) & 0x3F;
   162         mmu_lrui = (val >> 26) & 0x3F;
   163         val &= 0x00000301;
   164         tmp = MMIO_READ( MMU, MMUCR );
   165         if( (val ^ tmp) & MMUCR_AT ) {
   166             // AT flag has changed state - flush the xlt cache as all bets
   167             // are off now. We also need to force an immediate exit from the
   168             // current block
   169             MMIO_WRITE( MMU, MMUCR, val );
   170             sh4_flush_icache();
   171         }
   172         break;
   173     case CCR:
   174         mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
   175         val &= 0x81A7;
   176         break;
   177     default:
   178         break;
   179     }
   180     MMIO_WRITE( MMU, reg, val );
   181 }
   184 void MMU_init() 
   185 {
   186     cache = mem_alloc_pages(2);
   187 }
   189 void MMU_reset()
   190 {
   191     mmio_region_MMU_write( CCR, 0 );
   192     mmio_region_MMU_write( MMUCR, 0 );
   193 }
   195 void MMU_save_state( FILE *f )
   196 {
   197     fwrite( cache, 4096, 2, f );
   198     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   199     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   200     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   201     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   202     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   203     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   204 }
   206 int MMU_load_state( FILE *f )
   207 {
   208     /* Setup the cache mode according to the saved register value
   209      * (mem_load runs before this point to load all MMIO data)
   210      */
   211     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   212     if( fread( cache, 4096, 2, f ) != 2 ) {
   213         return 1;
   214     }
   215     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   216         return 1;
   217     }
   218     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   219         return 1;
   220     }
   221     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   222         return 1;
   223     }
   224     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   225         return 1;
   226     }
   227     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   228         return 1;
   229     }
   230     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   231         return 1;
   232     }
   233     return 0;
   234 }
   236 void mmu_set_cache_mode( int mode )
   237 {
   238     uint32_t i;
   239     switch( mode ) {
   240     case MEM_OC_INDEX0: /* OIX=0 */
   241         for( i=OCRAM_START; i<OCRAM_END; i++ )
   242             page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
   243         break;
   244     case MEM_OC_INDEX1: /* OIX=1 */
   245         for( i=OCRAM_START; i<OCRAM_END; i++ )
   246             page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
   247         break;
   248     default: /* disabled */
   249         for( i=OCRAM_START; i<OCRAM_END; i++ )
   250             page_map[i] = NULL;
   251         break;
   252     }
   253 }
   255 /* TLB maintanence */
   257 /**
   258  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   259  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   260  */
   261 void MMU_ldtlb()
   262 {
   263     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   264     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   265     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   266     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   267     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   268     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   269 }
   271 static void mmu_invalidate_tlb()
   272 {
   273     int i;
   274     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   275         mmu_itlb[i].flags &= (~TLB_VALID);
   276     }
   277     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   278         mmu_utlb[i].flags &= (~TLB_VALID);
   279     }
   280 }
   282 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   284 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   285 {
   286     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   287     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   288 }
   289 int32_t mmu_itlb_data_read( sh4addr_t addr )
   290 {
   291     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   292     return ent->ppn | ent->flags;
   293 }
   295 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   296 {
   297     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   298     ent->vpn = val & 0xFFFFFC00;
   299     ent->asid = val & 0x000000FF;
   300     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   301 }
   303 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   304 {
   305     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   306     ent->ppn = val & 0x1FFFFC00;
   307     ent->flags = val & 0x00001DA;
   308     ent->mask = get_mask_for_flags(val);
   309 }
   311 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   312 #define UTLB_ASSOC(addr) (addr&0x80)
   313 #define UTLB_DATA2(addr) (addr&0x00800000)
   315 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   316 {
   317     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   318     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   319     ((ent->flags & TLB_DIRTY)<<7);
   320 }
   321 int32_t mmu_utlb_data_read( sh4addr_t addr )
   322 {
   323     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   324     if( UTLB_DATA2(addr) ) {
   325         return ent->pcmcia;
   326     } else {
   327         return ent->ppn | ent->flags;
   328     }
   329 }
   331 /**
   332  * Find a UTLB entry for the associative TLB write - same as the normal
   333  * lookup but ignores the valid bit.
   334  */
   335 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   336 {
   337     int result = -1;
   338     unsigned int i;
   339     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   340         if( (mmu_utlb[i].flags & TLB_VALID) &&
   341                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
   342                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   343             if( result != -1 ) {
   344                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   345                 return -2;
   346             }
   347             result = i;
   348         }
   349     }
   350     return result;
   351 }
   353 /**
   354  * Find a ITLB entry for the associative TLB write - same as the normal
   355  * lookup but ignores the valid bit.
   356  */
   357 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   358 {
   359     int result = -1;
   360     unsigned int i;
   361     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   362         if( (mmu_itlb[i].flags & TLB_VALID) &&
   363                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
   364                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   365             if( result != -1 ) {
   366                 return -2;
   367             }
   368             result = i;
   369         }
   370     }
   371     return result;
   372 }
   374 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   375 {
   376     if( UTLB_ASSOC(addr) ) {
   377         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   378         if( utlb >= 0 ) {
   379             struct utlb_entry *ent = &mmu_utlb[utlb];
   380             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   381             ent->flags |= (val & TLB_VALID);
   382             ent->flags |= ((val & 0x200)>>7);
   383         }
   385         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   386         if( itlb >= 0 ) {
   387             struct itlb_entry *ent = &mmu_itlb[itlb];
   388             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   389         }
   391         if( itlb == -2 || utlb == -2 ) {
   392             MMU_TLB_MULTI_HIT_ERROR(addr);
   393             return;
   394         }
   395     } else {
   396         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   397         ent->vpn = (val & 0xFFFFFC00);
   398         ent->asid = (val & 0xFF);
   399         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   400         ent->flags |= (val & TLB_VALID);
   401         ent->flags |= ((val & 0x200)>>7);
   402     }
   403 }
   405 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   406 {
   407     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   408     if( UTLB_DATA2(addr) ) {
   409         ent->pcmcia = val & 0x0000000F;
   410     } else {
   411         ent->ppn = (val & 0x1FFFFC00);
   412         ent->flags = (val & 0x000001FF);
   413         ent->mask = get_mask_for_flags(val);
   414     }
   415 }
   417 /* Cache access - not implemented */
   419 int32_t mmu_icache_addr_read( sh4addr_t addr )
   420 {
   421     return 0; // not implemented
   422 }
   423 int32_t mmu_icache_data_read( sh4addr_t addr )
   424 {
   425     return 0; // not implemented
   426 }
   427 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   428 {
   429     return 0; // not implemented
   430 }
   431 int32_t mmu_ocache_data_read( sh4addr_t addr )
   432 {
   433     return 0; // not implemented
   434 }
   436 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   437 {
   438 }
   440 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   441 {
   442 }
   444 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   445 {
   446 }
   448 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   449 {
   450 }
   452 /******************************************************************************/
   453 /*                        MMU TLB address translation                         */
   454 /******************************************************************************/
   456 /**
   457  * The translations are excessively complicated, but unfortunately it's a 
   458  * complicated system. TODO: make this not be painfully slow.
   459  */
   461 /**
   462  * Perform the actual utlb lookup w/ asid matching.
   463  * Possible utcomes are:
   464  *   0..63 Single match - good, return entry found
   465  *   -1 No match - raise a tlb data miss exception
   466  *   -2 Multiple matches - raise a multi-hit exception (reset)
   467  * @param vpn virtual address to resolve
   468  * @return the resultant UTLB entry, or an error.
   469  */
   470 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   471 {
   472     int result = -1;
   473     unsigned int i;
   475     mmu_urc++;
   476     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   477         mmu_urc = 0;
   478     }
   480     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   481         if( (mmu_utlb[i].flags & TLB_VALID) &&
   482                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) && 
   483                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   484             if( result != -1 ) {
   485                 return -2;
   486             }
   487             result = i;
   488         }
   489     }
   490     return result;
   491 }
   493 /**
   494  * Perform the actual utlb lookup matching on vpn only
   495  * Possible utcomes are:
   496  *   0..63 Single match - good, return entry found
   497  *   -1 No match - raise a tlb data miss exception
   498  *   -2 Multiple matches - raise a multi-hit exception (reset)
   499  * @param vpn virtual address to resolve
   500  * @return the resultant UTLB entry, or an error.
   501  */
   502 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   503 {
   504     int result = -1;
   505     unsigned int i;
   507     mmu_urc++;
   508     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   509         mmu_urc = 0;
   510     }
   512     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   513         if( (mmu_utlb[i].flags & TLB_VALID) &&
   514                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   515             if( result != -1 ) {
   516                 return -2;
   517             }
   518             result = i;
   519         }
   520     }
   522     return result;
   523 }
   525 /**
   526  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   527  * @return the number (0-3) of the replaced entry.
   528  */
   529 static int inline mmu_itlb_update_from_utlb( int entryNo )
   530 {
   531     int replace;
   532     /* Determine entry to replace based on lrui */
   533     if( (mmu_lrui & 0x38) == 0x38 ) {
   534         replace = 0;
   535         mmu_lrui = mmu_lrui & 0x07;
   536     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   537         replace = 1;
   538         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   539     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   540         replace = 2;
   541         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   542     } else { // Note - gets invalid entries too
   543         replace = 3;
   544         mmu_lrui = (mmu_lrui | 0x0B);
   545     } 
   547     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   548     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   549     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   550     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   551     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   552     return replace;
   553 }
   555 /**
   556  * Perform the actual itlb lookup w/ asid protection
   557  * Possible utcomes are:
   558  *   0..63 Single match - good, return entry found
   559  *   -1 No match - raise a tlb data miss exception
   560  *   -2 Multiple matches - raise a multi-hit exception (reset)
   561  * @param vpn virtual address to resolve
   562  * @return the resultant ITLB entry, or an error.
   563  */
   564 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   565 {
   566     int result = -1;
   567     unsigned int i;
   569     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   570         if( (mmu_itlb[i].flags & TLB_VALID) &&
   571                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) && 
   572                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   573             if( result != -1 ) {
   574                 return -2;
   575             }
   576             result = i;
   577         }
   578     }
   580     if( result == -1 ) {
   581         int utlbEntry = mmu_utlb_lookup_vpn_asid( vpn );
   582         if( utlbEntry < 0 ) {
   583             return utlbEntry;
   584         } else {
   585             return mmu_itlb_update_from_utlb( utlbEntry );
   586         }
   587     }
   589     switch( result ) {
   590     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   591     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   592     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   593     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   594     }
   596     return result;
   597 }
   599 /**
   600  * Perform the actual itlb lookup on vpn only
   601  * Possible utcomes are:
   602  *   0..63 Single match - good, return entry found
   603  *   -1 No match - raise a tlb data miss exception
   604  *   -2 Multiple matches - raise a multi-hit exception (reset)
   605  * @param vpn virtual address to resolve
   606  * @return the resultant ITLB entry, or an error.
   607  */
   608 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   609 {
   610     int result = -1;
   611     unsigned int i;
   613     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   614         if( (mmu_itlb[i].flags & TLB_VALID) &&
   615                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   616             if( result != -1 ) {
   617                 return -2;
   618             }
   619             result = i;
   620         }
   621     }
   623     if( result == -1 ) {
   624         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   625         if( utlbEntry < 0 ) {
   626             return utlbEntry;
   627         } else {
   628             return mmu_itlb_update_from_utlb( utlbEntry );
   629         }
   630     }
   632     switch( result ) {
   633     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   634     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   635     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   636     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   637     }
   639     return result;
   640 }
   642 sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
   643 {
   644     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   645     if( addr & 0x80000000 ) {
   646         if( IS_SH4_PRIVMODE() ) {
   647             if( addr >= 0xE0000000 ) {
   648                 return addr; /* P4 - passthrough */
   649             } else if( addr < 0xC0000000 ) {
   650                 /* P1, P2 regions are pass-through (no translation) */
   651                 return VMA_TO_EXT_ADDR(addr);
   652             }
   653         } else {
   654             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   655                     ((mmucr&MMUCR_SQMD) == 0) ) {
   656                 /* Conditional user-mode access to the store-queue (no translation) */
   657                 return addr;
   658             }
   659             MMU_READ_ADDR_ERROR();
   660             return MMU_VMA_ERROR;
   661         }
   662     }
   664     if( (mmucr & MMUCR_AT) == 0 ) {
   665         return VMA_TO_EXT_ADDR(addr);
   666     }
   668     /* If we get this far, translation is required */
   669     int entryNo;
   670     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   671         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   672     } else {
   673         entryNo = mmu_utlb_lookup_vpn( addr );
   674     }
   676     switch(entryNo) {
   677     case -1:
   678     MMU_TLB_READ_MISS_ERROR(addr);
   679     return MMU_VMA_ERROR;
   680     case -2:
   681     MMU_TLB_MULTI_HIT_ERROR(addr);
   682     return MMU_VMA_ERROR;
   683     default:
   684         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   685                 !IS_SH4_PRIVMODE() ) {
   686             /* protection violation */
   687             MMU_TLB_READ_PROT_ERROR(addr);
   688             return MMU_VMA_ERROR;
   689         }
   691         /* finally generate the target address */
   692         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   693         	(addr & (~mmu_utlb[entryNo].mask));
   694         if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4 
   695         	pma |= 0xE0000000;
   696         return pma;
   697     }
   698 }
   700 sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
   701 {
   702     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   703     if( addr & 0x80000000 ) {
   704         if( IS_SH4_PRIVMODE() ) {
   705             if( addr >= 0xE0000000 ) {
   706                 return addr; /* P4 - passthrough */
   707             } else if( addr < 0xC0000000 ) {
   708                 /* P1, P2 regions are pass-through (no translation) */
   709                 return VMA_TO_EXT_ADDR(addr);
   710             }
   711         } else {
   712             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   713                     ((mmucr&MMUCR_SQMD) == 0) ) {
   714                 /* Conditional user-mode access to the store-queue (no translation) */
   715                 return addr;
   716             }
   717             MMU_WRITE_ADDR_ERROR();
   718             return MMU_VMA_ERROR;
   719         }
   720     }
   722     if( (mmucr & MMUCR_AT) == 0 ) {
   723         return VMA_TO_EXT_ADDR(addr);
   724     }
   726     /* If we get this far, translation is required */
   727     int entryNo;
   728     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   729         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   730     } else {
   731         entryNo = mmu_utlb_lookup_vpn( addr );
   732     }
   734     switch(entryNo) {
   735     case -1:
   736     MMU_TLB_WRITE_MISS_ERROR(addr);
   737     return MMU_VMA_ERROR;
   738     case -2:
   739     MMU_TLB_MULTI_HIT_ERROR(addr);
   740     return MMU_VMA_ERROR;
   741     default:
   742         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   743                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   744             /* protection violation */
   745             MMU_TLB_WRITE_PROT_ERROR(addr);
   746             return MMU_VMA_ERROR;
   747         }
   749         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   750             MMU_TLB_INITIAL_WRITE_ERROR(addr);
   751             return MMU_VMA_ERROR;
   752         }
   754         /* finally generate the target address */
   755         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   756         	(addr & (~mmu_utlb[entryNo].mask));
   757         if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4 
   758         	pma |= 0xE0000000;
   759         return pma;
   760     }
   761 }
   763 /**
   764  * Update the icache for an untranslated address
   765  */
   766 void mmu_update_icache_phys( sh4addr_t addr )
   767 {
   768     if( (addr & 0x1C000000) == 0x0C000000 ) {
   769         /* Main ram */
   770         sh4_icache.page_vma = addr & 0xFF000000;
   771         sh4_icache.page_ppa = 0x0C000000;
   772         sh4_icache.mask = 0xFF000000;
   773         sh4_icache.page = sh4_main_ram;
   774     } else if( (addr & 0x1FE00000) == 0 ) {
   775         /* BIOS ROM */
   776         sh4_icache.page_vma = addr & 0xFFE00000;
   777         sh4_icache.page_ppa = 0;
   778         sh4_icache.mask = 0xFFE00000;
   779         sh4_icache.page = mem_get_region(0);
   780     } else {
   781         /* not supported */
   782         sh4_icache.page_vma = -1;
   783     }
   784 }
   786 /**
   787  * Update the sh4_icache structure to describe the page(s) containing the
   788  * given vma. If the address does not reference a RAM/ROM region, the icache
   789  * will be invalidated instead.
   790  * If AT is on, this method will raise TLB exceptions normally
   791  * (hence this method should only be used immediately prior to execution of
   792  * code), and otherwise will set the icache according to the matching TLB entry.
   793  * If AT is off, this method will set the entire referenced RAM/ROM region in
   794  * the icache.
   795  * @return TRUE if the update completed (successfully or otherwise), FALSE
   796  * if an exception was raised.
   797  */
   798 gboolean mmu_update_icache( sh4vma_t addr )
   799 {
   800     int entryNo;
   801     if( IS_SH4_PRIVMODE()  ) {
   802         if( addr & 0x80000000 ) {
   803             if( addr < 0xC0000000 ) {
   804                 /* P1, P2 and P4 regions are pass-through (no translation) */
   805                 mmu_update_icache_phys(addr);
   806                 return TRUE;
   807             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   808                 MMU_READ_ADDR_ERROR();
   809                 return FALSE;
   810             }
   811         }
   813         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   814         if( (mmucr & MMUCR_AT) == 0 ) {
   815             mmu_update_icache_phys(addr);
   816             return TRUE;
   817         }
   819         if( (mmucr & MMUCR_SV) == 0 ) 
   820         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
   821         else
   822         	entryNo = mmu_itlb_lookup_vpn( addr );
   823     } else {
   824         if( addr & 0x80000000 ) {
   825             MMU_READ_ADDR_ERROR();
   826             return FALSE;
   827         }
   829         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   830         if( (mmucr & MMUCR_AT) == 0 ) {
   831             mmu_update_icache_phys(addr);
   832             return TRUE;
   833         }
   835         entryNo = mmu_itlb_lookup_vpn_asid( addr );
   837         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
   838             MMU_TLB_READ_PROT_ERROR(addr);
   839             return FALSE;
   840         }
   841     }
   843     switch(entryNo) {
   844     case -1:
   845     MMU_TLB_READ_MISS_ERROR(addr);
   846     return FALSE;
   847     case -2:
   848     MMU_TLB_MULTI_HIT_ERROR(addr);
   849     return FALSE;
   850     default:
   851         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
   852         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
   853         if( sh4_icache.page == NULL ) {
   854             sh4_icache.page_vma = -1;
   855         } else {
   856             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
   857             sh4_icache.mask = mmu_itlb[entryNo].mask;
   858         }
   859         return TRUE;
   860     }
   861 }
   863 /**
   864  * Translate address for disassembly purposes (ie performs an instruction 
   865  * lookup) - does not raise exceptions or modify any state, and ignores
   866  * protection bits. Returns the translated address, or MMU_VMA_ERROR
   867  * on translation failure. 
   868  */
   869 sh4addr_t mmu_vma_to_phys_disasm( sh4vma_t vma )
   870 {
   871     if( vma & 0x80000000 ) {
   872         if( vma < 0xC0000000 ) {
   873             /* P1, P2 and P4 regions are pass-through (no translation) */
   874             return VMA_TO_EXT_ADDR(vma);
   875         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
   876             /* Not translatable */
   877             return MMU_VMA_ERROR;
   878         }
   879     }
   881     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   882     if( (mmucr & MMUCR_AT) == 0 ) {
   883         return VMA_TO_EXT_ADDR(vma);
   884     }
   886     int entryNo = mmu_itlb_lookup_vpn( vma );
   887     if( entryNo == -2 ) {
   888         entryNo = mmu_itlb_lookup_vpn_asid( vma );
   889     }
   890     if( entryNo < 0 ) {
   891         return MMU_VMA_ERROR;
   892     } else {
   893         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) | 
   894         (vma & (~mmu_itlb[entryNo].mask));	
   895     }
   896 }
   898 gboolean sh4_flush_store_queue( sh4addr_t addr )
   899 {
   900     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   901     int queue = (addr&0x20)>>2;
   902     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
   903     sh4addr_t target;
   904     /* Store queue operation */
   905     if( mmucr & MMUCR_AT ) {
   906         int entryNo;
   907         if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   908             entryNo = mmu_utlb_lookup_vpn_asid( addr );
   909         } else {
   910             entryNo = mmu_utlb_lookup_vpn( addr );
   911         }
   912         switch(entryNo) {
   913         case -1:
   914         MMU_TLB_WRITE_MISS_ERROR(addr);
   915         return FALSE;
   916         case -2:
   917         MMU_TLB_MULTI_HIT_ERROR(addr);
   918         return FALSE;
   919         default:
   920             if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   921                     : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   922                 /* protection violation */
   923                 MMU_TLB_WRITE_PROT_ERROR(addr);
   924                 return FALSE;
   925             }
   927             if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   928                 MMU_TLB_INITIAL_WRITE_ERROR(addr);
   929                 return FALSE;
   930             }
   932             /* finally generate the target address */
   933             target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   934                     (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
   935         }
   936     } else {
   937         uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
   938         target = (addr&0x03FFFFE0) | hi;
   939     }
   940     mem_copy_to_sh4( target, src, 32 );
   941     return TRUE;
   942 }
.