Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 819:ef4fec10a63a
prev818:2e08d8237d33
next826:69f2c9f1e608
author nkeynes
date Tue Aug 19 22:58:05 2008 +0000 (12 years ago)
permissions -rw-r--r--
last change Add stubs for the (undocumented) SH4 performance counter registers
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include "sh4/sh4mmio.h"
    22 #include "sh4/sh4core.h"
    23 #include "sh4/sh4trans.h"
    24 #include "mem.h"
    26 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    28 /* The MMU (practically unique in the system) is allowed to raise exceptions
    29  * directly, with a return code indicating that one was raised and the caller
    30  * had better behave appropriately.
    31  */
    32 #define RAISE_TLB_ERROR(code, vpn) \
    33     MMIO_WRITE(MMU, TEA, vpn); \
    34     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    35     sh4_raise_tlb_exception(code);
    37 #define RAISE_MEM_ERROR(code, vpn) \
    38     MMIO_WRITE(MMU, TEA, vpn); \
    39     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    40     sh4_raise_exception(code);
    42 #define RAISE_OTHER_ERROR(code) \
    43     sh4_raise_exception(code);
    44 /**
    45  * Abort with a non-MMU address error. Caused by user-mode code attempting
    46  * to access privileged regions, or alignment faults.
    47  */
    48 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    49 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    51 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    52 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    53 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    54 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    55 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    56 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    57     MMIO_WRITE(MMU, TEA, vpn); \
    58     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    61 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
    62 #define OCRAM_END   (0x20000000>>LXDREAM_PAGE_BITS)
    64 #define ITLB_ENTRY_COUNT 4
    65 #define UTLB_ENTRY_COUNT 64
    67 /* Entry address */
    68 #define TLB_VALID     0x00000100
    69 #define TLB_USERMODE  0x00000040
    70 #define TLB_WRITABLE  0x00000020
    71 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    72 #define TLB_SIZE_MASK 0x00000090
    73 #define TLB_SIZE_1K   0x00000000
    74 #define TLB_SIZE_4K   0x00000010
    75 #define TLB_SIZE_64K  0x00000080
    76 #define TLB_SIZE_1M   0x00000090
    77 #define TLB_CACHEABLE 0x00000008
    78 #define TLB_DIRTY     0x00000004
    79 #define TLB_SHARE     0x00000002
    80 #define TLB_WRITETHRU 0x00000001
    82 #define MASK_1K  0xFFFFFC00
    83 #define MASK_4K  0xFFFFF000
    84 #define MASK_64K 0xFFFF0000
    85 #define MASK_1M  0xFFF00000
    87 struct itlb_entry {
    88     sh4addr_t vpn; // Virtual Page Number
    89     uint32_t asid; // Process ID
    90     uint32_t mask;
    91     sh4addr_t ppn; // Physical Page Number
    92     uint32_t flags;
    93 };
    95 struct utlb_entry {
    96     sh4addr_t vpn; // Virtual Page Number
    97     uint32_t mask; // Page size mask
    98     uint32_t asid; // Process ID
    99     sh4addr_t ppn; // Physical Page Number
   100     uint32_t flags;
   101     uint32_t pcmcia; // extra pcmcia data - not used
   102 };
   104 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   105 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   106 static uint32_t mmu_urc;
   107 static uint32_t mmu_urb;
   108 static uint32_t mmu_lrui;
   109 static uint32_t mmu_asid; // current asid
   111 static sh4ptr_t cache = NULL;
   113 static void mmu_invalidate_tlb();
   116 static uint32_t get_mask_for_flags( uint32_t flags )
   117 {
   118     switch( flags & TLB_SIZE_MASK ) {
   119     case TLB_SIZE_1K: return MASK_1K;
   120     case TLB_SIZE_4K: return MASK_4K;
   121     case TLB_SIZE_64K: return MASK_64K;
   122     case TLB_SIZE_1M: return MASK_1M;
   123     default: return 0; /* Unreachable */
   124     }
   125 }
   127 int32_t mmio_region_MMU_read( uint32_t reg )
   128 {
   129     switch( reg ) {
   130     case MMUCR:
   131         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   132     default:
   133         return MMIO_READ( MMU, reg );
   134     }
   135 }
   137 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   138 {
   139     uint32_t tmp;
   140     switch(reg) {
   141     case SH4VER:
   142         return;
   143     case PTEH:
   144         val &= 0xFFFFFCFF;
   145         if( (val & 0xFF) != mmu_asid ) {
   146             mmu_asid = val&0xFF;
   147             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   148         }
   149         break;
   150     case PTEL:
   151         val &= 0x1FFFFDFF;
   152         break;
   153     case PTEA:
   154         val &= 0x0000000F;
   155         break;
   156     case MMUCR:
   157         if( val & MMUCR_TI ) {
   158             mmu_invalidate_tlb();
   159         }
   160         mmu_urc = (val >> 10) & 0x3F;
   161         mmu_urb = (val >> 18) & 0x3F;
   162         mmu_lrui = (val >> 26) & 0x3F;
   163         val &= 0x00000301;
   164         tmp = MMIO_READ( MMU, MMUCR );
   165         if( (val ^ tmp) & MMUCR_AT ) {
   166             // AT flag has changed state - flush the xlt cache as all bets
   167             // are off now. We also need to force an immediate exit from the
   168             // current block
   169             MMIO_WRITE( MMU, MMUCR, val );
   170             sh4_flush_icache();
   171         }
   172         break;
   173     case CCR:
   174         mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
   175         val &= 0x81A7;
   176         break;
   177     case PMCR1:
   178     case PMCR2:
   179         if( val != 0 ) {
   180             WARN( "Performance counters not implemented" );
   181         }
   182         break;
   183     default:
   184         break;
   185     }
   186     MMIO_WRITE( MMU, reg, val );
   187 }
   190 void MMU_init() 
   191 {
   192     cache = mem_alloc_pages(2);
   193 }
   195 void MMU_reset()
   196 {
   197     mmio_region_MMU_write( CCR, 0 );
   198     mmio_region_MMU_write( MMUCR, 0 );
   199 }
   201 void MMU_save_state( FILE *f )
   202 {
   203     fwrite( cache, 4096, 2, f );
   204     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   205     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   206     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   207     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   208     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   209     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   210 }
   212 int MMU_load_state( FILE *f )
   213 {
   214     /* Setup the cache mode according to the saved register value
   215      * (mem_load runs before this point to load all MMIO data)
   216      */
   217     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   218     if( fread( cache, 4096, 2, f ) != 2 ) {
   219         return 1;
   220     }
   221     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   222         return 1;
   223     }
   224     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   225         return 1;
   226     }
   227     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   228         return 1;
   229     }
   230     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   231         return 1;
   232     }
   233     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   234         return 1;
   235     }
   236     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   237         return 1;
   238     }
   239     return 0;
   240 }
   242 void mmu_set_cache_mode( int mode )
   243 {
   244     uint32_t i;
   245     switch( mode ) {
   246     case MEM_OC_INDEX0: /* OIX=0 */
   247         for( i=OCRAM_START; i<OCRAM_END; i++ )
   248             page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
   249         break;
   250     case MEM_OC_INDEX1: /* OIX=1 */
   251         for( i=OCRAM_START; i<OCRAM_END; i++ )
   252             page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
   253         break;
   254     default: /* disabled */
   255         for( i=OCRAM_START; i<OCRAM_END; i++ )
   256             page_map[i] = NULL;
   257         break;
   258     }
   259 }
   261 /* TLB maintanence */
   263 /**
   264  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   265  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   266  */
   267 void MMU_ldtlb()
   268 {
   269     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   270     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   271     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   272     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   273     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   274     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   275 }
   277 static void mmu_invalidate_tlb()
   278 {
   279     int i;
   280     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   281         mmu_itlb[i].flags &= (~TLB_VALID);
   282     }
   283     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   284         mmu_utlb[i].flags &= (~TLB_VALID);
   285     }
   286 }
   288 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   290 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   291 {
   292     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   293     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   294 }
   295 int32_t mmu_itlb_data_read( sh4addr_t addr )
   296 {
   297     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   298     return ent->ppn | ent->flags;
   299 }
   301 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   302 {
   303     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   304     ent->vpn = val & 0xFFFFFC00;
   305     ent->asid = val & 0x000000FF;
   306     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   307 }
   309 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   310 {
   311     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   312     ent->ppn = val & 0x1FFFFC00;
   313     ent->flags = val & 0x00001DA;
   314     ent->mask = get_mask_for_flags(val);
   315 }
   317 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   318 #define UTLB_ASSOC(addr) (addr&0x80)
   319 #define UTLB_DATA2(addr) (addr&0x00800000)
   321 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   322 {
   323     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   324     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   325     ((ent->flags & TLB_DIRTY)<<7);
   326 }
   327 int32_t mmu_utlb_data_read( sh4addr_t addr )
   328 {
   329     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   330     if( UTLB_DATA2(addr) ) {
   331         return ent->pcmcia;
   332     } else {
   333         return ent->ppn | ent->flags;
   334     }
   335 }
   337 /**
   338  * Find a UTLB entry for the associative TLB write - same as the normal
   339  * lookup but ignores the valid bit.
   340  */
   341 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   342 {
   343     int result = -1;
   344     unsigned int i;
   345     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   346         if( (mmu_utlb[i].flags & TLB_VALID) &&
   347                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
   348                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   349             if( result != -1 ) {
   350                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   351                 return -2;
   352             }
   353             result = i;
   354         }
   355     }
   356     return result;
   357 }
   359 /**
   360  * Find a ITLB entry for the associative TLB write - same as the normal
   361  * lookup but ignores the valid bit.
   362  */
   363 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   364 {
   365     int result = -1;
   366     unsigned int i;
   367     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   368         if( (mmu_itlb[i].flags & TLB_VALID) &&
   369                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
   370                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   371             if( result != -1 ) {
   372                 return -2;
   373             }
   374             result = i;
   375         }
   376     }
   377     return result;
   378 }
   380 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   381 {
   382     if( UTLB_ASSOC(addr) ) {
   383         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   384         if( utlb >= 0 ) {
   385             struct utlb_entry *ent = &mmu_utlb[utlb];
   386             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   387             ent->flags |= (val & TLB_VALID);
   388             ent->flags |= ((val & 0x200)>>7);
   389         }
   391         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   392         if( itlb >= 0 ) {
   393             struct itlb_entry *ent = &mmu_itlb[itlb];
   394             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   395         }
   397         if( itlb == -2 || utlb == -2 ) {
   398             MMU_TLB_MULTI_HIT_ERROR(addr);
   399             return;
   400         }
   401     } else {
   402         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   403         ent->vpn = (val & 0xFFFFFC00);
   404         ent->asid = (val & 0xFF);
   405         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   406         ent->flags |= (val & TLB_VALID);
   407         ent->flags |= ((val & 0x200)>>7);
   408     }
   409 }
   411 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   412 {
   413     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   414     if( UTLB_DATA2(addr) ) {
   415         ent->pcmcia = val & 0x0000000F;
   416     } else {
   417         ent->ppn = (val & 0x1FFFFC00);
   418         ent->flags = (val & 0x000001FF);
   419         ent->mask = get_mask_for_flags(val);
   420     }
   421 }
   423 /* Cache access - not implemented */
   425 int32_t mmu_icache_addr_read( sh4addr_t addr )
   426 {
   427     return 0; // not implemented
   428 }
   429 int32_t mmu_icache_data_read( sh4addr_t addr )
   430 {
   431     return 0; // not implemented
   432 }
   433 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   434 {
   435     return 0; // not implemented
   436 }
   437 int32_t mmu_ocache_data_read( sh4addr_t addr )
   438 {
   439     return 0; // not implemented
   440 }
   442 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   443 {
   444 }
   446 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   447 {
   448 }
   450 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   451 {
   452 }
   454 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   455 {
   456 }
   458 /******************************************************************************/
   459 /*                        MMU TLB address translation                         */
   460 /******************************************************************************/
   462 /**
   463  * The translations are excessively complicated, but unfortunately it's a 
   464  * complicated system. TODO: make this not be painfully slow.
   465  */
   467 /**
   468  * Perform the actual utlb lookup w/ asid matching.
   469  * Possible utcomes are:
   470  *   0..63 Single match - good, return entry found
   471  *   -1 No match - raise a tlb data miss exception
   472  *   -2 Multiple matches - raise a multi-hit exception (reset)
   473  * @param vpn virtual address to resolve
   474  * @return the resultant UTLB entry, or an error.
   475  */
   476 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   477 {
   478     int result = -1;
   479     unsigned int i;
   481     mmu_urc++;
   482     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   483         mmu_urc = 0;
   484     }
   486     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   487         if( (mmu_utlb[i].flags & TLB_VALID) &&
   488                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) && 
   489                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   490             if( result != -1 ) {
   491                 return -2;
   492             }
   493             result = i;
   494         }
   495     }
   496     return result;
   497 }
   499 /**
   500  * Perform the actual utlb lookup matching on vpn only
   501  * Possible utcomes are:
   502  *   0..63 Single match - good, return entry found
   503  *   -1 No match - raise a tlb data miss exception
   504  *   -2 Multiple matches - raise a multi-hit exception (reset)
   505  * @param vpn virtual address to resolve
   506  * @return the resultant UTLB entry, or an error.
   507  */
   508 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   509 {
   510     int result = -1;
   511     unsigned int i;
   513     mmu_urc++;
   514     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   515         mmu_urc = 0;
   516     }
   518     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   519         if( (mmu_utlb[i].flags & TLB_VALID) &&
   520                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   521             if( result != -1 ) {
   522                 return -2;
   523             }
   524             result = i;
   525         }
   526     }
   528     return result;
   529 }
   531 /**
   532  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   533  * @return the number (0-3) of the replaced entry.
   534  */
   535 static int inline mmu_itlb_update_from_utlb( int entryNo )
   536 {
   537     int replace;
   538     /* Determine entry to replace based on lrui */
   539     if( (mmu_lrui & 0x38) == 0x38 ) {
   540         replace = 0;
   541         mmu_lrui = mmu_lrui & 0x07;
   542     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   543         replace = 1;
   544         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   545     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   546         replace = 2;
   547         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   548     } else { // Note - gets invalid entries too
   549         replace = 3;
   550         mmu_lrui = (mmu_lrui | 0x0B);
   551     } 
   553     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   554     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   555     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   556     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   557     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   558     return replace;
   559 }
   561 /**
   562  * Perform the actual itlb lookup w/ asid protection
   563  * Possible utcomes are:
   564  *   0..63 Single match - good, return entry found
   565  *   -1 No match - raise a tlb data miss exception
   566  *   -2 Multiple matches - raise a multi-hit exception (reset)
   567  * @param vpn virtual address to resolve
   568  * @return the resultant ITLB entry, or an error.
   569  */
   570 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   571 {
   572     int result = -1;
   573     unsigned int i;
   575     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   576         if( (mmu_itlb[i].flags & TLB_VALID) &&
   577                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) && 
   578                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   579             if( result != -1 ) {
   580                 return -2;
   581             }
   582             result = i;
   583         }
   584     }
   586     if( result == -1 ) {
   587         int utlbEntry = mmu_utlb_lookup_vpn_asid( vpn );
   588         if( utlbEntry < 0 ) {
   589             return utlbEntry;
   590         } else {
   591             return mmu_itlb_update_from_utlb( utlbEntry );
   592         }
   593     }
   595     switch( result ) {
   596     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   597     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   598     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   599     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   600     }
   602     return result;
   603 }
   605 /**
   606  * Perform the actual itlb lookup on vpn only
   607  * Possible utcomes are:
   608  *   0..63 Single match - good, return entry found
   609  *   -1 No match - raise a tlb data miss exception
   610  *   -2 Multiple matches - raise a multi-hit exception (reset)
   611  * @param vpn virtual address to resolve
   612  * @return the resultant ITLB entry, or an error.
   613  */
   614 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   615 {
   616     int result = -1;
   617     unsigned int i;
   619     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   620         if( (mmu_itlb[i].flags & TLB_VALID) &&
   621                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   622             if( result != -1 ) {
   623                 return -2;
   624             }
   625             result = i;
   626         }
   627     }
   629     if( result == -1 ) {
   630         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   631         if( utlbEntry < 0 ) {
   632             return utlbEntry;
   633         } else {
   634             return mmu_itlb_update_from_utlb( utlbEntry );
   635         }
   636     }
   638     switch( result ) {
   639     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   640     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   641     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   642     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   643     }
   645     return result;
   646 }
   648 sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
   649 {
   650     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   651     if( addr & 0x80000000 ) {
   652         if( IS_SH4_PRIVMODE() ) {
   653             if( addr >= 0xE0000000 ) {
   654                 return addr; /* P4 - passthrough */
   655             } else if( addr < 0xC0000000 ) {
   656                 /* P1, P2 regions are pass-through (no translation) */
   657                 return VMA_TO_EXT_ADDR(addr);
   658             }
   659         } else {
   660             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   661                     ((mmucr&MMUCR_SQMD) == 0) ) {
   662                 /* Conditional user-mode access to the store-queue (no translation) */
   663                 return addr;
   664             }
   665             MMU_READ_ADDR_ERROR();
   666             return MMU_VMA_ERROR;
   667         }
   668     }
   670     if( (mmucr & MMUCR_AT) == 0 ) {
   671         return VMA_TO_EXT_ADDR(addr);
   672     }
   674     /* If we get this far, translation is required */
   675     int entryNo;
   676     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   677         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   678     } else {
   679         entryNo = mmu_utlb_lookup_vpn( addr );
   680     }
   682     switch(entryNo) {
   683     case -1:
   684     MMU_TLB_READ_MISS_ERROR(addr);
   685     return MMU_VMA_ERROR;
   686     case -2:
   687     MMU_TLB_MULTI_HIT_ERROR(addr);
   688     return MMU_VMA_ERROR;
   689     default:
   690         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   691                 !IS_SH4_PRIVMODE() ) {
   692             /* protection violation */
   693             MMU_TLB_READ_PROT_ERROR(addr);
   694             return MMU_VMA_ERROR;
   695         }
   697         /* finally generate the target address */
   698         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   699         	(addr & (~mmu_utlb[entryNo].mask));
   700         if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4 
   701         	pma |= 0xE0000000;
   702         return pma;
   703     }
   704 }
   706 sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
   707 {
   708     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   709     if( addr & 0x80000000 ) {
   710         if( IS_SH4_PRIVMODE() ) {
   711             if( addr >= 0xE0000000 ) {
   712                 return addr; /* P4 - passthrough */
   713             } else if( addr < 0xC0000000 ) {
   714                 /* P1, P2 regions are pass-through (no translation) */
   715                 return VMA_TO_EXT_ADDR(addr);
   716             }
   717         } else {
   718             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   719                     ((mmucr&MMUCR_SQMD) == 0) ) {
   720                 /* Conditional user-mode access to the store-queue (no translation) */
   721                 return addr;
   722             }
   723             MMU_WRITE_ADDR_ERROR();
   724             return MMU_VMA_ERROR;
   725         }
   726     }
   728     if( (mmucr & MMUCR_AT) == 0 ) {
   729         return VMA_TO_EXT_ADDR(addr);
   730     }
   732     /* If we get this far, translation is required */
   733     int entryNo;
   734     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   735         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   736     } else {
   737         entryNo = mmu_utlb_lookup_vpn( addr );
   738     }
   740     switch(entryNo) {
   741     case -1:
   742     MMU_TLB_WRITE_MISS_ERROR(addr);
   743     return MMU_VMA_ERROR;
   744     case -2:
   745     MMU_TLB_MULTI_HIT_ERROR(addr);
   746     return MMU_VMA_ERROR;
   747     default:
   748         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   749                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   750             /* protection violation */
   751             MMU_TLB_WRITE_PROT_ERROR(addr);
   752             return MMU_VMA_ERROR;
   753         }
   755         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   756             MMU_TLB_INITIAL_WRITE_ERROR(addr);
   757             return MMU_VMA_ERROR;
   758         }
   760         /* finally generate the target address */
   761         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   762         	(addr & (~mmu_utlb[entryNo].mask));
   763         if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4 
   764         	pma |= 0xE0000000;
   765         return pma;
   766     }
   767 }
   769 /**
   770  * Update the icache for an untranslated address
   771  */
   772 void mmu_update_icache_phys( sh4addr_t addr )
   773 {
   774     if( (addr & 0x1C000000) == 0x0C000000 ) {
   775         /* Main ram */
   776         sh4_icache.page_vma = addr & 0xFF000000;
   777         sh4_icache.page_ppa = 0x0C000000;
   778         sh4_icache.mask = 0xFF000000;
   779         sh4_icache.page = sh4_main_ram;
   780     } else if( (addr & 0x1FE00000) == 0 ) {
   781         /* BIOS ROM */
   782         sh4_icache.page_vma = addr & 0xFFE00000;
   783         sh4_icache.page_ppa = 0;
   784         sh4_icache.mask = 0xFFE00000;
   785         sh4_icache.page = mem_get_region(0);
   786     } else {
   787         /* not supported */
   788         sh4_icache.page_vma = -1;
   789     }
   790 }
   792 /**
   793  * Update the sh4_icache structure to describe the page(s) containing the
   794  * given vma. If the address does not reference a RAM/ROM region, the icache
   795  * will be invalidated instead.
   796  * If AT is on, this method will raise TLB exceptions normally
   797  * (hence this method should only be used immediately prior to execution of
   798  * code), and otherwise will set the icache according to the matching TLB entry.
   799  * If AT is off, this method will set the entire referenced RAM/ROM region in
   800  * the icache.
   801  * @return TRUE if the update completed (successfully or otherwise), FALSE
   802  * if an exception was raised.
   803  */
   804 gboolean mmu_update_icache( sh4vma_t addr )
   805 {
   806     int entryNo;
   807     if( IS_SH4_PRIVMODE()  ) {
   808         if( addr & 0x80000000 ) {
   809             if( addr < 0xC0000000 ) {
   810                 /* P1, P2 and P4 regions are pass-through (no translation) */
   811                 mmu_update_icache_phys(addr);
   812                 return TRUE;
   813             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   814                 MMU_READ_ADDR_ERROR();
   815                 return FALSE;
   816             }
   817         }
   819         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   820         if( (mmucr & MMUCR_AT) == 0 ) {
   821             mmu_update_icache_phys(addr);
   822             return TRUE;
   823         }
   825         if( (mmucr & MMUCR_SV) == 0 ) 
   826         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
   827         else
   828         	entryNo = mmu_itlb_lookup_vpn( addr );
   829     } else {
   830         if( addr & 0x80000000 ) {
   831             MMU_READ_ADDR_ERROR();
   832             return FALSE;
   833         }
   835         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   836         if( (mmucr & MMUCR_AT) == 0 ) {
   837             mmu_update_icache_phys(addr);
   838             return TRUE;
   839         }
   841         entryNo = mmu_itlb_lookup_vpn_asid( addr );
   843         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
   844             MMU_TLB_READ_PROT_ERROR(addr);
   845             return FALSE;
   846         }
   847     }
   849     switch(entryNo) {
   850     case -1:
   851     MMU_TLB_READ_MISS_ERROR(addr);
   852     return FALSE;
   853     case -2:
   854     MMU_TLB_MULTI_HIT_ERROR(addr);
   855     return FALSE;
   856     default:
   857         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
   858         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
   859         if( sh4_icache.page == NULL ) {
   860             sh4_icache.page_vma = -1;
   861         } else {
   862             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
   863             sh4_icache.mask = mmu_itlb[entryNo].mask;
   864         }
   865         return TRUE;
   866     }
   867 }
   869 /**
   870  * Translate address for disassembly purposes (ie performs an instruction 
   871  * lookup) - does not raise exceptions or modify any state, and ignores
   872  * protection bits. Returns the translated address, or MMU_VMA_ERROR
   873  * on translation failure. 
   874  */
   875 sh4addr_t mmu_vma_to_phys_disasm( sh4vma_t vma )
   876 {
   877     if( vma & 0x80000000 ) {
   878         if( vma < 0xC0000000 ) {
   879             /* P1, P2 and P4 regions are pass-through (no translation) */
   880             return VMA_TO_EXT_ADDR(vma);
   881         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
   882             /* Not translatable */
   883             return MMU_VMA_ERROR;
   884         }
   885     }
   887     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   888     if( (mmucr & MMUCR_AT) == 0 ) {
   889         return VMA_TO_EXT_ADDR(vma);
   890     }
   892     int entryNo = mmu_itlb_lookup_vpn( vma );
   893     if( entryNo == -2 ) {
   894         entryNo = mmu_itlb_lookup_vpn_asid( vma );
   895     }
   896     if( entryNo < 0 ) {
   897         return MMU_VMA_ERROR;
   898     } else {
   899         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) | 
   900         (vma & (~mmu_itlb[entryNo].mask));	
   901     }
   902 }
   904 gboolean sh4_flush_store_queue( sh4addr_t addr )
   905 {
   906     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   907     int queue = (addr&0x20)>>2;
   908     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
   909     sh4addr_t target;
   910     /* Store queue operation */
   911     if( mmucr & MMUCR_AT ) {
   912         int entryNo;
   913         if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   914             entryNo = mmu_utlb_lookup_vpn_asid( addr );
   915         } else {
   916             entryNo = mmu_utlb_lookup_vpn( addr );
   917         }
   918         switch(entryNo) {
   919         case -1:
   920         MMU_TLB_WRITE_MISS_ERROR(addr);
   921         return FALSE;
   922         case -2:
   923         MMU_TLB_MULTI_HIT_ERROR(addr);
   924         return FALSE;
   925         default:
   926             if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   927                     : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   928                 /* protection violation */
   929                 MMU_TLB_WRITE_PROT_ERROR(addr);
   930                 return FALSE;
   931             }
   933             if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   934                 MMU_TLB_INITIAL_WRITE_ERROR(addr);
   935                 return FALSE;
   936             }
   938             /* finally generate the target address */
   939             target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   940                     (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
   941         }
   942     } else {
   943         uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
   944         target = (addr&0x03FFFFE0) | hi;
   945     }
   946     mem_copy_to_sh4( target, src, 32 );
   947     return TRUE;
   948 }
   950 /********************************* PMM *************************************/
   952 /**
   953  * Side note - this is here (rather than in sh4mmio.c) as the control registers
   954  * are part of the MMU block, and it seems simplest to keep it all together.
   955  */
   957 int32_t mmio_region_PMM_read( uint32_t reg )
   958 {
   959     return MMIO_READ( PMM, reg );
   960 }
   962 void mmio_region_PMM_write( uint32_t reg, uint32_t val )
   963 {
   964     /* Read-only */
   965 }
.