Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 817:e9d2d9be7cb6
prev810:833cc4960556
next818:2e08d8237d33
author nkeynes
date Tue Aug 19 08:38:10 2008 +0000 (12 years ago)
permissions -rw-r--r--
last change Fix CCR register mask
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include "sh4/sh4mmio.h"
    22 #include "sh4/sh4core.h"
    23 #include "sh4/sh4trans.h"
    24 #include "mem.h"
    26 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    28 /* The MMU (practically unique in the system) is allowed to raise exceptions
    29  * directly, with a return code indicating that one was raised and the caller
    30  * had better behave appropriately.
    31  */
    32 #define RAISE_TLB_ERROR(code, vpn) \
    33     MMIO_WRITE(MMU, TEA, vpn); \
    34     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    35     sh4_raise_tlb_exception(code);
    37 #define RAISE_MEM_ERROR(code, vpn) \
    38     MMIO_WRITE(MMU, TEA, vpn); \
    39     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    40     sh4_raise_exception(code);
    42 #define RAISE_OTHER_ERROR(code) \
    43     sh4_raise_exception(code);
    44 /**
    45  * Abort with a non-MMU address error. Caused by user-mode code attempting
    46  * to access privileged regions, or alignment faults.
    47  */
    48 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    49 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    51 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    52 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    53 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    54 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    55 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    56 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    57     MMIO_WRITE(MMU, TEA, vpn); \
    58     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    61 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
    62 #define OCRAM_END   (0x20000000>>LXDREAM_PAGE_BITS)
    64 #define ITLB_ENTRY_COUNT 4
    65 #define UTLB_ENTRY_COUNT 64
    67 /* Entry address */
    68 #define TLB_VALID     0x00000100
    69 #define TLB_USERMODE  0x00000040
    70 #define TLB_WRITABLE  0x00000020
    71 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    72 #define TLB_SIZE_MASK 0x00000090
    73 #define TLB_SIZE_1K   0x00000000
    74 #define TLB_SIZE_4K   0x00000010
    75 #define TLB_SIZE_64K  0x00000080
    76 #define TLB_SIZE_1M   0x00000090
    77 #define TLB_CACHEABLE 0x00000008
    78 #define TLB_DIRTY     0x00000004
    79 #define TLB_SHARE     0x00000002
    80 #define TLB_WRITETHRU 0x00000001
    82 #define MASK_1K  0xFFFFFC00
    83 #define MASK_4K  0xFFFFF000
    84 #define MASK_64K 0xFFFF0000
    85 #define MASK_1M  0xFFF00000
    87 struct itlb_entry {
    88     sh4addr_t vpn; // Virtual Page Number
    89     uint32_t asid; // Process ID
    90     uint32_t mask;
    91     sh4addr_t ppn; // Physical Page Number
    92     uint32_t flags;
    93 };
    95 struct utlb_entry {
    96     sh4addr_t vpn; // Virtual Page Number
    97     uint32_t mask; // Page size mask
    98     uint32_t asid; // Process ID
    99     sh4addr_t ppn; // Physical Page Number
   100     uint32_t flags;
   101     uint32_t pcmcia; // extra pcmcia data - not used
   102 };
   104 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   105 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   106 static uint32_t mmu_urc;
   107 static uint32_t mmu_urb;
   108 static uint32_t mmu_lrui;
   109 static uint32_t mmu_asid; // current asid
   111 static sh4ptr_t cache = NULL;
   113 static void mmu_invalidate_tlb();
   116 static uint32_t get_mask_for_flags( uint32_t flags )
   117 {
   118     switch( flags & TLB_SIZE_MASK ) {
   119     case TLB_SIZE_1K: return MASK_1K;
   120     case TLB_SIZE_4K: return MASK_4K;
   121     case TLB_SIZE_64K: return MASK_64K;
   122     case TLB_SIZE_1M: return MASK_1M;
   123     default: return 0; /* Unreachable */
   124     }
   125 }
   127 int32_t mmio_region_MMU_read( uint32_t reg )
   128 {
   129     switch( reg ) {
   130     case MMUCR:
   131         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   132     default:
   133         return MMIO_READ( MMU, reg );
   134     }
   135 }
   137 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   138 {
   139     uint32_t tmp;
   140     switch(reg) {
   141     case PTEH:
   142         val &= 0xFFFFFCFF;
   143         if( (val & 0xFF) != mmu_asid ) {
   144             mmu_asid = val&0xFF;
   145             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   146         }
   147         break;
   148     case PTEL:
   149         val &= 0x1FFFFDFF;
   150         break;
   151     case PTEA:
   152         val &= 0x0000000F;
   153         break;
   154     case MMUCR:
   155         if( val & MMUCR_TI ) {
   156             mmu_invalidate_tlb();
   157         }
   158         mmu_urc = (val >> 10) & 0x3F;
   159         mmu_urb = (val >> 18) & 0x3F;
   160         mmu_lrui = (val >> 26) & 0x3F;
   161         val &= 0x00000301;
   162         tmp = MMIO_READ( MMU, MMUCR );
   163         if( (val ^ tmp) & MMUCR_AT ) {
   164             // AT flag has changed state - flush the xlt cache as all bets
   165             // are off now. We also need to force an immediate exit from the
   166             // current block
   167             MMIO_WRITE( MMU, MMUCR, val );
   168             sh4_flush_icache();
   169         }
   170         break;
   171     case CCR:
   172         mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
   173         val &= 0x81A7;
   174         break;
   175     default:
   176         break;
   177     }
   178     MMIO_WRITE( MMU, reg, val );
   179 }
   182 void MMU_init() 
   183 {
   184     cache = mem_alloc_pages(2);
   185 }
   187 void MMU_reset()
   188 {
   189     mmio_region_MMU_write( CCR, 0 );
   190     mmio_region_MMU_write( MMUCR, 0 );
   191 }
   193 void MMU_save_state( FILE *f )
   194 {
   195     fwrite( cache, 4096, 2, f );
   196     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   197     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   198     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   199     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   200     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   201     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   202 }
   204 int MMU_load_state( FILE *f )
   205 {
   206     /* Setup the cache mode according to the saved register value
   207      * (mem_load runs before this point to load all MMIO data)
   208      */
   209     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   210     if( fread( cache, 4096, 2, f ) != 2 ) {
   211         return 1;
   212     }
   213     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   214         return 1;
   215     }
   216     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   217         return 1;
   218     }
   219     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   220         return 1;
   221     }
   222     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   223         return 1;
   224     }
   225     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   226         return 1;
   227     }
   228     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   229         return 1;
   230     }
   231     return 0;
   232 }
   234 void mmu_set_cache_mode( int mode )
   235 {
   236     uint32_t i;
   237     switch( mode ) {
   238     case MEM_OC_INDEX0: /* OIX=0 */
   239         for( i=OCRAM_START; i<OCRAM_END; i++ )
   240             page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
   241         break;
   242     case MEM_OC_INDEX1: /* OIX=1 */
   243         for( i=OCRAM_START; i<OCRAM_END; i++ )
   244             page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
   245         break;
   246     default: /* disabled */
   247         for( i=OCRAM_START; i<OCRAM_END; i++ )
   248             page_map[i] = NULL;
   249         break;
   250     }
   251 }
   253 /* TLB maintanence */
   255 /**
   256  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   257  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   258  */
   259 void MMU_ldtlb()
   260 {
   261     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   262     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   263     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   264     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   265     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   266     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   267 }
   269 static void mmu_invalidate_tlb()
   270 {
   271     int i;
   272     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   273         mmu_itlb[i].flags &= (~TLB_VALID);
   274     }
   275     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   276         mmu_utlb[i].flags &= (~TLB_VALID);
   277     }
   278 }
   280 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   282 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   283 {
   284     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   285     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   286 }
   287 int32_t mmu_itlb_data_read( sh4addr_t addr )
   288 {
   289     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   290     return ent->ppn | ent->flags;
   291 }
   293 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   294 {
   295     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   296     ent->vpn = val & 0xFFFFFC00;
   297     ent->asid = val & 0x000000FF;
   298     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   299 }
   301 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   302 {
   303     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   304     ent->ppn = val & 0x1FFFFC00;
   305     ent->flags = val & 0x00001DA;
   306     ent->mask = get_mask_for_flags(val);
   307 }
   309 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   310 #define UTLB_ASSOC(addr) (addr&0x80)
   311 #define UTLB_DATA2(addr) (addr&0x00800000)
   313 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   314 {
   315     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   316     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   317     ((ent->flags & TLB_DIRTY)<<7);
   318 }
   319 int32_t mmu_utlb_data_read( sh4addr_t addr )
   320 {
   321     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   322     if( UTLB_DATA2(addr) ) {
   323         return ent->pcmcia;
   324     } else {
   325         return ent->ppn | ent->flags;
   326     }
   327 }
   329 /**
   330  * Find a UTLB entry for the associative TLB write - same as the normal
   331  * lookup but ignores the valid bit.
   332  */
   333 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   334 {
   335     int result = -1;
   336     unsigned int i;
   337     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   338         if( (mmu_utlb[i].flags & TLB_VALID) &&
   339                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
   340                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   341             if( result != -1 ) {
   342                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   343                 return -2;
   344             }
   345             result = i;
   346         }
   347     }
   348     return result;
   349 }
   351 /**
   352  * Find a ITLB entry for the associative TLB write - same as the normal
   353  * lookup but ignores the valid bit.
   354  */
   355 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   356 {
   357     int result = -1;
   358     unsigned int i;
   359     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   360         if( (mmu_itlb[i].flags & TLB_VALID) &&
   361                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
   362                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   363             if( result != -1 ) {
   364                 return -2;
   365             }
   366             result = i;
   367         }
   368     }
   369     return result;
   370 }
   372 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   373 {
   374     if( UTLB_ASSOC(addr) ) {
   375         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   376         if( utlb >= 0 ) {
   377             struct utlb_entry *ent = &mmu_utlb[utlb];
   378             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   379             ent->flags |= (val & TLB_VALID);
   380             ent->flags |= ((val & 0x200)>>7);
   381         }
   383         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   384         if( itlb >= 0 ) {
   385             struct itlb_entry *ent = &mmu_itlb[itlb];
   386             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   387         }
   389         if( itlb == -2 || utlb == -2 ) {
   390             MMU_TLB_MULTI_HIT_ERROR(addr);
   391             return;
   392         }
   393     } else {
   394         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   395         ent->vpn = (val & 0xFFFFFC00);
   396         ent->asid = (val & 0xFF);
   397         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   398         ent->flags |= (val & TLB_VALID);
   399         ent->flags |= ((val & 0x200)>>7);
   400     }
   401 }
   403 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   404 {
   405     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   406     if( UTLB_DATA2(addr) ) {
   407         ent->pcmcia = val & 0x0000000F;
   408     } else {
   409         ent->ppn = (val & 0x1FFFFC00);
   410         ent->flags = (val & 0x000001FF);
   411         ent->mask = get_mask_for_flags(val);
   412     }
   413 }
   415 /* Cache access - not implemented */
   417 int32_t mmu_icache_addr_read( sh4addr_t addr )
   418 {
   419     return 0; // not implemented
   420 }
   421 int32_t mmu_icache_data_read( sh4addr_t addr )
   422 {
   423     return 0; // not implemented
   424 }
   425 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   426 {
   427     return 0; // not implemented
   428 }
   429 int32_t mmu_ocache_data_read( sh4addr_t addr )
   430 {
   431     return 0; // not implemented
   432 }
   434 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   435 {
   436 }
   438 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   439 {
   440 }
   442 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   443 {
   444 }
   446 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   447 {
   448 }
   450 /******************************************************************************/
   451 /*                        MMU TLB address translation                         */
   452 /******************************************************************************/
   454 /**
   455  * The translations are excessively complicated, but unfortunately it's a 
   456  * complicated system. TODO: make this not be painfully slow.
   457  */
   459 /**
   460  * Perform the actual utlb lookup w/ asid matching.
   461  * Possible utcomes are:
   462  *   0..63 Single match - good, return entry found
   463  *   -1 No match - raise a tlb data miss exception
   464  *   -2 Multiple matches - raise a multi-hit exception (reset)
   465  * @param vpn virtual address to resolve
   466  * @return the resultant UTLB entry, or an error.
   467  */
   468 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   469 {
   470     int result = -1;
   471     unsigned int i;
   473     mmu_urc++;
   474     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   475         mmu_urc = 0;
   476     }
   478     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   479         if( (mmu_utlb[i].flags & TLB_VALID) &&
   480                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) && 
   481                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   482             if( result != -1 ) {
   483                 return -2;
   484             }
   485             result = i;
   486         }
   487     }
   488     return result;
   489 }
   491 /**
   492  * Perform the actual utlb lookup matching on vpn only
   493  * Possible utcomes are:
   494  *   0..63 Single match - good, return entry found
   495  *   -1 No match - raise a tlb data miss exception
   496  *   -2 Multiple matches - raise a multi-hit exception (reset)
   497  * @param vpn virtual address to resolve
   498  * @return the resultant UTLB entry, or an error.
   499  */
   500 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   501 {
   502     int result = -1;
   503     unsigned int i;
   505     mmu_urc++;
   506     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   507         mmu_urc = 0;
   508     }
   510     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   511         if( (mmu_utlb[i].flags & TLB_VALID) &&
   512                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   513             if( result != -1 ) {
   514                 return -2;
   515             }
   516             result = i;
   517         }
   518     }
   520     return result;
   521 }
   523 /**
   524  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   525  * @return the number (0-3) of the replaced entry.
   526  */
   527 static int inline mmu_itlb_update_from_utlb( int entryNo )
   528 {
   529     int replace;
   530     /* Determine entry to replace based on lrui */
   531     if( (mmu_lrui & 0x38) == 0x38 ) {
   532         replace = 0;
   533         mmu_lrui = mmu_lrui & 0x07;
   534     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   535         replace = 1;
   536         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   537     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   538         replace = 2;
   539         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   540     } else { // Note - gets invalid entries too
   541         replace = 3;
   542         mmu_lrui = (mmu_lrui | 0x0B);
   543     } 
   545     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   546     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   547     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   548     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   549     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   550     return replace;
   551 }
   553 /**
   554  * Perform the actual itlb lookup w/ asid protection
   555  * Possible utcomes are:
   556  *   0..63 Single match - good, return entry found
   557  *   -1 No match - raise a tlb data miss exception
   558  *   -2 Multiple matches - raise a multi-hit exception (reset)
   559  * @param vpn virtual address to resolve
   560  * @return the resultant ITLB entry, or an error.
   561  */
   562 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   563 {
   564     int result = -1;
   565     unsigned int i;
   567     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   568         if( (mmu_itlb[i].flags & TLB_VALID) &&
   569                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) && 
   570                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   571             if( result != -1 ) {
   572                 return -2;
   573             }
   574             result = i;
   575         }
   576     }
   578     if( result == -1 ) {
   579         int utlbEntry = mmu_utlb_lookup_vpn_asid( vpn );
   580         if( utlbEntry < 0 ) {
   581             return utlbEntry;
   582         } else {
   583             return mmu_itlb_update_from_utlb( utlbEntry );
   584         }
   585     }
   587     switch( result ) {
   588     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   589     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   590     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   591     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   592     }
   594     return result;
   595 }
   597 /**
   598  * Perform the actual itlb lookup on vpn only
   599  * Possible utcomes are:
   600  *   0..63 Single match - good, return entry found
   601  *   -1 No match - raise a tlb data miss exception
   602  *   -2 Multiple matches - raise a multi-hit exception (reset)
   603  * @param vpn virtual address to resolve
   604  * @return the resultant ITLB entry, or an error.
   605  */
   606 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   607 {
   608     int result = -1;
   609     unsigned int i;
   611     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   612         if( (mmu_itlb[i].flags & TLB_VALID) &&
   613                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   614             if( result != -1 ) {
   615                 return -2;
   616             }
   617             result = i;
   618         }
   619     }
   621     if( result == -1 ) {
   622         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   623         if( utlbEntry < 0 ) {
   624             return utlbEntry;
   625         } else {
   626             return mmu_itlb_update_from_utlb( utlbEntry );
   627         }
   628     }
   630     switch( result ) {
   631     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   632     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   633     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   634     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   635     }
   637     return result;
   638 }
   640 sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
   641 {
   642     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   643     if( addr & 0x80000000 ) {
   644         if( IS_SH4_PRIVMODE() ) {
   645             if( addr >= 0xE0000000 ) {
   646                 return addr; /* P4 - passthrough */
   647             } else if( addr < 0xC0000000 ) {
   648                 /* P1, P2 regions are pass-through (no translation) */
   649                 return VMA_TO_EXT_ADDR(addr);
   650             }
   651         } else {
   652             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   653                     ((mmucr&MMUCR_SQMD) == 0) ) {
   654                 /* Conditional user-mode access to the store-queue (no translation) */
   655                 return addr;
   656             }
   657             MMU_READ_ADDR_ERROR();
   658             return MMU_VMA_ERROR;
   659         }
   660     }
   662     if( (mmucr & MMUCR_AT) == 0 ) {
   663         return VMA_TO_EXT_ADDR(addr);
   664     }
   666     /* If we get this far, translation is required */
   667     int entryNo;
   668     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   669         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   670     } else {
   671         entryNo = mmu_utlb_lookup_vpn( addr );
   672     }
   674     switch(entryNo) {
   675     case -1:
   676     MMU_TLB_READ_MISS_ERROR(addr);
   677     return MMU_VMA_ERROR;
   678     case -2:
   679     MMU_TLB_MULTI_HIT_ERROR(addr);
   680     return MMU_VMA_ERROR;
   681     default:
   682         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   683                 !IS_SH4_PRIVMODE() ) {
   684             /* protection violation */
   685             MMU_TLB_READ_PROT_ERROR(addr);
   686             return MMU_VMA_ERROR;
   687         }
   689         /* finally generate the target address */
   690         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   691         	(addr & (~mmu_utlb[entryNo].mask));
   692         if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4 
   693         	pma |= 0xE0000000;
   694         return pma;
   695     }
   696 }
   698 sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
   699 {
   700     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   701     if( addr & 0x80000000 ) {
   702         if( IS_SH4_PRIVMODE() ) {
   703             if( addr >= 0xE0000000 ) {
   704                 return addr; /* P4 - passthrough */
   705             } else if( addr < 0xC0000000 ) {
   706                 /* P1, P2 regions are pass-through (no translation) */
   707                 return VMA_TO_EXT_ADDR(addr);
   708             }
   709         } else {
   710             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   711                     ((mmucr&MMUCR_SQMD) == 0) ) {
   712                 /* Conditional user-mode access to the store-queue (no translation) */
   713                 return addr;
   714             }
   715             MMU_WRITE_ADDR_ERROR();
   716             return MMU_VMA_ERROR;
   717         }
   718     }
   720     if( (mmucr & MMUCR_AT) == 0 ) {
   721         return VMA_TO_EXT_ADDR(addr);
   722     }
   724     /* If we get this far, translation is required */
   725     int entryNo;
   726     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   727         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   728     } else {
   729         entryNo = mmu_utlb_lookup_vpn( addr );
   730     }
   732     switch(entryNo) {
   733     case -1:
   734     MMU_TLB_WRITE_MISS_ERROR(addr);
   735     return MMU_VMA_ERROR;
   736     case -2:
   737     MMU_TLB_MULTI_HIT_ERROR(addr);
   738     return MMU_VMA_ERROR;
   739     default:
   740         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   741                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   742             /* protection violation */
   743             MMU_TLB_WRITE_PROT_ERROR(addr);
   744             return MMU_VMA_ERROR;
   745         }
   747         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   748             MMU_TLB_INITIAL_WRITE_ERROR(addr);
   749             return MMU_VMA_ERROR;
   750         }
   752         /* finally generate the target address */
   753         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   754         	(addr & (~mmu_utlb[entryNo].mask));
   755         if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4 
   756         	pma |= 0xE0000000;
   757         return pma;
   758     }
   759 }
   761 /**
   762  * Update the icache for an untranslated address
   763  */
   764 void mmu_update_icache_phys( sh4addr_t addr )
   765 {
   766     if( (addr & 0x1C000000) == 0x0C000000 ) {
   767         /* Main ram */
   768         sh4_icache.page_vma = addr & 0xFF000000;
   769         sh4_icache.page_ppa = 0x0C000000;
   770         sh4_icache.mask = 0xFF000000;
   771         sh4_icache.page = sh4_main_ram;
   772     } else if( (addr & 0x1FE00000) == 0 ) {
   773         /* BIOS ROM */
   774         sh4_icache.page_vma = addr & 0xFFE00000;
   775         sh4_icache.page_ppa = 0;
   776         sh4_icache.mask = 0xFFE00000;
   777         sh4_icache.page = mem_get_region(0);
   778     } else {
   779         /* not supported */
   780         sh4_icache.page_vma = -1;
   781     }
   782 }
   784 /**
   785  * Update the sh4_icache structure to describe the page(s) containing the
   786  * given vma. If the address does not reference a RAM/ROM region, the icache
   787  * will be invalidated instead.
   788  * If AT is on, this method will raise TLB exceptions normally
   789  * (hence this method should only be used immediately prior to execution of
   790  * code), and otherwise will set the icache according to the matching TLB entry.
   791  * If AT is off, this method will set the entire referenced RAM/ROM region in
   792  * the icache.
   793  * @return TRUE if the update completed (successfully or otherwise), FALSE
   794  * if an exception was raised.
   795  */
   796 gboolean mmu_update_icache( sh4vma_t addr )
   797 {
   798     int entryNo;
   799     if( IS_SH4_PRIVMODE()  ) {
   800         if( addr & 0x80000000 ) {
   801             if( addr < 0xC0000000 ) {
   802                 /* P1, P2 and P4 regions are pass-through (no translation) */
   803                 mmu_update_icache_phys(addr);
   804                 return TRUE;
   805             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   806                 MMU_READ_ADDR_ERROR();
   807                 return FALSE;
   808             }
   809         }
   811         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   812         if( (mmucr & MMUCR_AT) == 0 ) {
   813             mmu_update_icache_phys(addr);
   814             return TRUE;
   815         }
   817         if( (mmucr & MMUCR_SV) == 0 ) 
   818         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
   819         else
   820         	entryNo = mmu_itlb_lookup_vpn( addr );
   821     } else {
   822         if( addr & 0x80000000 ) {
   823             MMU_READ_ADDR_ERROR();
   824             return FALSE;
   825         }
   827         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   828         if( (mmucr & MMUCR_AT) == 0 ) {
   829             mmu_update_icache_phys(addr);
   830             return TRUE;
   831         }
   833         entryNo = mmu_itlb_lookup_vpn_asid( addr );
   835         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
   836             MMU_TLB_READ_PROT_ERROR(addr);
   837             return FALSE;
   838         }
   839     }
   841     switch(entryNo) {
   842     case -1:
   843     MMU_TLB_READ_MISS_ERROR(addr);
   844     return FALSE;
   845     case -2:
   846     MMU_TLB_MULTI_HIT_ERROR(addr);
   847     return FALSE;
   848     default:
   849         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
   850         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
   851         if( sh4_icache.page == NULL ) {
   852             sh4_icache.page_vma = -1;
   853         } else {
   854             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
   855             sh4_icache.mask = mmu_itlb[entryNo].mask;
   856         }
   857         return TRUE;
   858     }
   859 }
   861 /**
   862  * Translate address for disassembly purposes (ie performs an instruction 
   863  * lookup) - does not raise exceptions or modify any state, and ignores
   864  * protection bits. Returns the translated address, or MMU_VMA_ERROR
   865  * on translation failure. 
   866  */
   867 sh4addr_t mmu_vma_to_phys_disasm( sh4vma_t vma )
   868 {
   869     if( vma & 0x80000000 ) {
   870         if( vma < 0xC0000000 ) {
   871             /* P1, P2 and P4 regions are pass-through (no translation) */
   872             return VMA_TO_EXT_ADDR(vma);
   873         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
   874             /* Not translatable */
   875             return MMU_VMA_ERROR;
   876         }
   877     }
   879     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   880     if( (mmucr & MMUCR_AT) == 0 ) {
   881         return VMA_TO_EXT_ADDR(vma);
   882     }
   884     int entryNo = mmu_itlb_lookup_vpn( vma );
   885     if( entryNo == -2 ) {
   886         entryNo = mmu_itlb_lookup_vpn_asid( vma );
   887     }
   888     if( entryNo < 0 ) {
   889         return MMU_VMA_ERROR;
   890     } else {
   891         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) | 
   892         (vma & (~mmu_itlb[entryNo].mask));	
   893     }
   894 }
   896 gboolean sh4_flush_store_queue( sh4addr_t addr )
   897 {
   898     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   899     int queue = (addr&0x20)>>2;
   900     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
   901     sh4addr_t target;
   902     /* Store queue operation */
   903     if( mmucr & MMUCR_AT ) {
   904         int entryNo;
   905         if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   906             entryNo = mmu_utlb_lookup_vpn_asid( addr );
   907         } else {
   908             entryNo = mmu_utlb_lookup_vpn( addr );
   909         }
   910         switch(entryNo) {
   911         case -1:
   912         MMU_TLB_WRITE_MISS_ERROR(addr);
   913         return FALSE;
   914         case -2:
   915         MMU_TLB_MULTI_HIT_ERROR(addr);
   916         return FALSE;
   917         default:
   918             if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   919                     : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   920                 /* protection violation */
   921                 MMU_TLB_WRITE_PROT_ERROR(addr);
   922                 return FALSE;
   923             }
   925             if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   926                 MMU_TLB_INITIAL_WRITE_ERROR(addr);
   927                 return FALSE;
   928             }
   930             /* finally generate the target address */
   931             target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   932                     (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
   933         }
   934     } else {
   935         uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
   936         target = (addr&0x03FFFFE0) | hi;
   937     }
   938     mem_copy_to_sh4( target, src, 32 );
   939     return TRUE;
   940 }
.