Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 915:c989eb4c22d8
prev911:2f6ba75b84d1
next927:17b6b9e245d8
author nkeynes
date Thu Dec 11 23:26:03 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Disable the generational translation cache - I've got no evidence that it
actually helps performance, and it simplifies things to get rid of it (in
particular, translated code doesn't have to worry about being moved now).
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include <assert.h>
    22 #include "sh4/sh4mmio.h"
    23 #include "sh4/sh4core.h"
    24 #include "sh4/sh4trans.h"
    25 #include "mem.h"
    27 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    29 /* The MMU (practically unique in the system) is allowed to raise exceptions
    30  * directly, with a return code indicating that one was raised and the caller
    31  * had better behave appropriately.
    32  */
    33 #define RAISE_TLB_ERROR(code, vpn) \
    34     MMIO_WRITE(MMU, TEA, vpn); \
    35     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    36     sh4_raise_tlb_exception(code);
    38 #define RAISE_MEM_ERROR(code, vpn) \
    39     MMIO_WRITE(MMU, TEA, vpn); \
    40     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    41     sh4_raise_exception(code);
    43 #define RAISE_OTHER_ERROR(code) \
    44     sh4_raise_exception(code);
    45 /**
    46  * Abort with a non-MMU address error. Caused by user-mode code attempting
    47  * to access privileged regions, or alignment faults.
    48  */
    49 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    50 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    52 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    53 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    54 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    55 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    56 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    57 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    58     MMIO_WRITE(MMU, TEA, vpn); \
    59     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    62 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
    63 #define OCRAM_END   (0x20000000>>LXDREAM_PAGE_BITS)
    65 #define ITLB_ENTRY_COUNT 4
    66 #define UTLB_ENTRY_COUNT 64
    68 /* Entry address */
    69 #define TLB_VALID     0x00000100
    70 #define TLB_USERMODE  0x00000040
    71 #define TLB_WRITABLE  0x00000020
    72 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    73 #define TLB_SIZE_MASK 0x00000090
    74 #define TLB_SIZE_1K   0x00000000
    75 #define TLB_SIZE_4K   0x00000010
    76 #define TLB_SIZE_64K  0x00000080
    77 #define TLB_SIZE_1M   0x00000090
    78 #define TLB_CACHEABLE 0x00000008
    79 #define TLB_DIRTY     0x00000004
    80 #define TLB_SHARE     0x00000002
    81 #define TLB_WRITETHRU 0x00000001
    83 #define MASK_1K  0xFFFFFC00
    84 #define MASK_4K  0xFFFFF000
    85 #define MASK_64K 0xFFFF0000
    86 #define MASK_1M  0xFFF00000
    88 struct itlb_entry {
    89     sh4addr_t vpn; // Virtual Page Number
    90     uint32_t asid; // Process ID
    91     uint32_t mask;
    92     sh4addr_t ppn; // Physical Page Number
    93     uint32_t flags;
    94 };
    96 struct utlb_entry {
    97     sh4addr_t vpn; // Virtual Page Number
    98     uint32_t mask; // Page size mask
    99     uint32_t asid; // Process ID
   100     sh4addr_t ppn; // Physical Page Number
   101     uint32_t flags;
   102     uint32_t pcmcia; // extra pcmcia data - not used
   103 };
   105 struct utlb_sort_entry {
   106     sh4addr_t key; // Masked VPN + ASID
   107     uint32_t mask; // Mask + 0x00FF
   108     int entryNo;
   109 };
   112 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   113 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   114 static uint32_t mmu_urc;
   115 static uint32_t mmu_urb;
   116 static uint32_t mmu_lrui;
   117 static uint32_t mmu_asid; // current asid
   119 static struct utlb_sort_entry mmu_utlb_sorted[UTLB_ENTRY_COUNT];
   120 static uint32_t mmu_utlb_entries; // Number of entries in mmu_utlb_sorted. 
   122 static sh4ptr_t cache = NULL;
   124 static void mmu_invalidate_tlb();
   125 static void mmu_utlb_sorted_reset();
   126 static void mmu_utlb_sorted_reload(); 
   129 static uint32_t get_mask_for_flags( uint32_t flags )
   130 {
   131     switch( flags & TLB_SIZE_MASK ) {
   132     case TLB_SIZE_1K: return MASK_1K;
   133     case TLB_SIZE_4K: return MASK_4K;
   134     case TLB_SIZE_64K: return MASK_64K;
   135     case TLB_SIZE_1M: return MASK_1M;
   136     default: return 0; /* Unreachable */
   137     }
   138 }
   140 int32_t mmio_region_MMU_read( uint32_t reg )
   141 {
   142     switch( reg ) {
   143     case MMUCR:
   144         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   145     default:
   146         return MMIO_READ( MMU, reg );
   147     }
   148 }
   150 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   151 {
   152     uint32_t tmp;
   153     switch(reg) {
   154     case SH4VER:
   155         return;
   156     case PTEH:
   157         val &= 0xFFFFFCFF;
   158         if( (val & 0xFF) != mmu_asid ) {
   159             mmu_asid = val&0xFF;
   160             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   161         }
   162         break;
   163     case PTEL:
   164         val &= 0x1FFFFDFF;
   165         break;
   166     case PTEA:
   167         val &= 0x0000000F;
   168         break;
   169     case TRA:
   170     	val &= 0x000003FC;
   171     	break;
   172     case EXPEVT:
   173     case INTEVT:
   174     	val &= 0x00000FFF;
   175     	break;
   176     case MMUCR:
   177         if( val & MMUCR_TI ) {
   178             mmu_invalidate_tlb();
   179         }
   180         mmu_urc = (val >> 10) & 0x3F;
   181         mmu_urb = (val >> 18) & 0x3F;
   182         mmu_lrui = (val >> 26) & 0x3F;
   183         val &= 0x00000301;
   184         tmp = MMIO_READ( MMU, MMUCR );
   185         if( (val ^ tmp) & (MMUCR_AT|MMUCR_SV) ) {
   186             // AT flag has changed state - flush the xlt cache as all bets
   187             // are off now. We also need to force an immediate exit from the
   188             // current block
   189             MMIO_WRITE( MMU, MMUCR, val );
   190             sh4_flush_icache();
   191         }
   192         break;
   193     case CCR:
   194         mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
   195         val &= 0x81A7;
   196         break;
   197     case MMUUNK1:
   198     	/* Note that if the high bit is set, this appears to reset the machine.
   199     	 * Not emulating this behaviour yet until we know why...
   200     	 */
   201     	val &= 0x00010007;
   202     	break;
   203     case QACR0:
   204     case QACR1:
   205     	val &= 0x0000001C;
   206     	break;
   207     case PMCR1:
   208         PMM_write_control(0, val);
   209         val &= 0x0000C13F;
   210         break;
   211     case PMCR2:
   212         PMM_write_control(1, val);
   213         val &= 0x0000C13F;
   214         break;
   215     default:
   216         break;
   217     }
   218     MMIO_WRITE( MMU, reg, val );
   219 }
   222 void MMU_init()
   223 {
   224     cache = mem_alloc_pages(2);
   225 }
   227 void MMU_reset()
   228 {
   229     mmio_region_MMU_write( CCR, 0 );
   230     mmio_region_MMU_write( MMUCR, 0 );
   231     mmu_utlb_sorted_reload();
   232 }
   234 void MMU_save_state( FILE *f )
   235 {
   236     fwrite( cache, 4096, 2, f );
   237     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   238     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   239     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   240     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   241     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   242     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   243 }
   245 int MMU_load_state( FILE *f )
   246 {
   247     /* Setup the cache mode according to the saved register value
   248      * (mem_load runs before this point to load all MMIO data)
   249      */
   250     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   251     if( fread( cache, 4096, 2, f ) != 2 ) {
   252         return 1;
   253     }
   254     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   255         return 1;
   256     }
   257     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   258         return 1;
   259     }
   260     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   261         return 1;
   262     }
   263     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   264         return 1;
   265     }
   266     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   267         return 1;
   268     }
   269     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   270         return 1;
   271     }
   272     mmu_utlb_sorted_reload();
   273     return 0;
   274 }
   276 void mmu_set_cache_mode( int mode )
   277 {
   278     uint32_t i;
   279     switch( mode ) {
   280     case MEM_OC_INDEX0: /* OIX=0 */
   281         for( i=OCRAM_START; i<OCRAM_END; i++ )
   282             page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
   283         break;
   284     case MEM_OC_INDEX1: /* OIX=1 */
   285         for( i=OCRAM_START; i<OCRAM_END; i++ )
   286             page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
   287         break;
   288     default: /* disabled */
   289         for( i=OCRAM_START; i<OCRAM_END; i++ )
   290             page_map[i] = NULL;
   291         break;
   292     }
   293 }
   295 /******************* Sorted TLB data structure ****************/
   296 /*
   297  * mmu_utlb_sorted maintains a list of all active (valid) entries,
   298  * sorted by masked VPN and then ASID. Multi-hit entries are resolved 
   299  * ahead of time, and have -1 recorded as the corresponding PPN.
   300  * 
   301  * FIXME: Multi-hit detection doesn't pick up cases where two pages 
   302  * overlap due to different sizes (and don't share the same base
   303  * address). 
   304  */ 
   305 static void mmu_utlb_sorted_reset() 
   306 {
   307     mmu_utlb_entries = 0;
   308 }
   310 /**
   311  * Find an entry in the sorted table (VPN+ASID check). 
   312  */
   313 static inline int mmu_utlb_sorted_find( sh4addr_t vma )
   314 {
   315     int low = 0;
   316     int high = mmu_utlb_entries;
   317     uint32_t lookup = (vma & 0xFFFFFC00) + mmu_asid;
   319     mmu_urc++;
   320     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   321         mmu_urc = 0;
   322     }
   324     while( low != high ) {
   325         int posn = (high+low)>>1;
   326         int masked = lookup & mmu_utlb_sorted[posn].mask;
   327         if( mmu_utlb_sorted[posn].key < masked ) {
   328             low = posn+1;
   329         } else if( mmu_utlb_sorted[posn].key > masked ) {
   330             high = posn;
   331         } else {
   332             return mmu_utlb_sorted[posn].entryNo;
   333         }
   334     }
   335     return -1;
   337 }
   339 static void mmu_utlb_insert_entry( int entry )
   340 {
   341     int low = 0;
   342     int high = mmu_utlb_entries;
   343     uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
   345     assert( mmu_utlb_entries < UTLB_ENTRY_COUNT );
   346     /* Find the insertion point */
   347     while( low != high ) {
   348         int posn = (high+low)>>1;
   349         if( mmu_utlb_sorted[posn].key < key ) {
   350             low = posn+1;
   351         } else if( mmu_utlb_sorted[posn].key > key ) {
   352             high = posn;
   353         } else {
   354             /* Exact match - multi-hit */
   355             mmu_utlb_sorted[posn].entryNo = -2;
   356             return;
   357         }
   358     } /* 0 2 4 6 */
   359     memmove( &mmu_utlb_sorted[low+1], &mmu_utlb_sorted[low], 
   360              (mmu_utlb_entries - low) * sizeof(struct utlb_sort_entry) );
   361     mmu_utlb_sorted[low].key = key;
   362     mmu_utlb_sorted[low].mask = mmu_utlb[entry].mask | 0x000000FF;
   363     mmu_utlb_sorted[low].entryNo = entry;
   364     mmu_utlb_entries++;
   365 }
   367 static void mmu_utlb_remove_entry( int entry )
   368 {
   369     int low = 0;
   370     int high = mmu_utlb_entries;
   371     uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
   372     while( low != high ) {
   373         int posn = (high+low)>>1;
   374         if( mmu_utlb_sorted[posn].key < key ) {
   375             low = posn+1;
   376         } else if( mmu_utlb_sorted[posn].key > key ) {
   377             high = posn;
   378         } else {
   379             if( mmu_utlb_sorted[posn].entryNo == -2 ) {
   380                 /* Multiple-entry recorded - rebuild the whole table minus entry */
   381                 int i;
   382                 mmu_utlb_entries = 0;
   383                 for( i=0; i< UTLB_ENTRY_COUNT; i++ ) {
   384                     if( i != entry && (mmu_utlb[i].flags & TLB_VALID)  ) {
   385                         mmu_utlb_insert_entry(i);
   386                     }
   387                 }
   388             } else {
   389                 mmu_utlb_entries--;
   390                 memmove( &mmu_utlb_sorted[posn], &mmu_utlb_sorted[posn+1],
   391                          (mmu_utlb_entries - posn)*sizeof(struct utlb_sort_entry) );
   392             }
   393             return;
   394         }
   395     }
   396     assert( 0 && "UTLB key not found!" );
   397 }
   399 static void mmu_utlb_sorted_reload()
   400 {
   401     int i;
   402     mmu_utlb_entries = 0;
   403     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   404         if( mmu_utlb[i].flags & TLB_VALID ) 
   405             mmu_utlb_insert_entry( i );
   406     }
   407 }
   409 /* TLB maintanence */
   411 /**
   412  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   413  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   414  */
   415 void MMU_ldtlb()
   416 {
   417     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   418         mmu_utlb_remove_entry( mmu_urc );
   419     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   420     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   421     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   422     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   423     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   424     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   425     if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
   426         mmu_utlb[mmu_urc].ppn |= 0xE0000000;
   427     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   428         mmu_utlb_insert_entry( mmu_urc );
   429 }
   431 static void mmu_invalidate_tlb()
   432 {
   433     int i;
   434     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   435         mmu_itlb[i].flags &= (~TLB_VALID);
   436     }
   437     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   438         mmu_utlb[i].flags &= (~TLB_VALID);
   439     }
   440     mmu_utlb_entries = 0;
   441 }
   443 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   445 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   446 {
   447     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   448     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   449 }
   450 int32_t mmu_itlb_data_read( sh4addr_t addr )
   451 {
   452     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   453     return (ent->ppn & 0x1FFFFC00) | ent->flags;
   454 }
   456 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   457 {
   458     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   459     ent->vpn = val & 0xFFFFFC00;
   460     ent->asid = val & 0x000000FF;
   461     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   462 }
   464 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   465 {
   466     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   467     ent->ppn = val & 0x1FFFFC00;
   468     ent->flags = val & 0x00001DA;
   469     ent->mask = get_mask_for_flags(val);
   470     if( ent->ppn >= 0x1C000000 )
   471         ent->ppn |= 0xE0000000;
   472 }
   474 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   475 #define UTLB_ASSOC(addr) (addr&0x80)
   476 #define UTLB_DATA2(addr) (addr&0x00800000)
   478 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   479 {
   480     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   481     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   482     ((ent->flags & TLB_DIRTY)<<7);
   483 }
   484 int32_t mmu_utlb_data_read( sh4addr_t addr )
   485 {
   486     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   487     if( UTLB_DATA2(addr) ) {
   488         return ent->pcmcia;
   489     } else {
   490         return (ent->ppn&0x1FFFFC00) | ent->flags;
   491     }
   492 }
   494 /**
   495  * Find a UTLB entry for the associative TLB write - same as the normal
   496  * lookup but ignores the valid bit.
   497  */
   498 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   499 {
   500     int result = -1;
   501     unsigned int i;
   502     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   503         if( (mmu_utlb[i].flags & TLB_VALID) &&
   504                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
   505                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   506             if( result != -1 ) {
   507                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   508                 return -2;
   509             }
   510             result = i;
   511         }
   512     }
   513     return result;
   514 }
   516 /**
   517  * Find a ITLB entry for the associative TLB write - same as the normal
   518  * lookup but ignores the valid bit.
   519  */
   520 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   521 {
   522     int result = -1;
   523     unsigned int i;
   524     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   525         if( (mmu_itlb[i].flags & TLB_VALID) &&
   526                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
   527                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   528             if( result != -1 ) {
   529                 return -2;
   530             }
   531             result = i;
   532         }
   533     }
   534     return result;
   535 }
   537 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   538 {
   539     if( UTLB_ASSOC(addr) ) {
   540         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   541         if( utlb >= 0 ) {
   542             struct utlb_entry *ent = &mmu_utlb[utlb];
   543             uint32_t old_flags = ent->flags;
   544             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   545             ent->flags |= (val & TLB_VALID);
   546             ent->flags |= ((val & 0x200)>>7);
   547             if( (old_flags & TLB_VALID) && !(ent->flags&TLB_VALID) ) {
   548                 mmu_utlb_remove_entry( utlb );
   549             } else if( !(old_flags & TLB_VALID) && (ent->flags&TLB_VALID) ) {
   550                 mmu_utlb_insert_entry( utlb );
   551             }
   552         }
   554         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   555         if( itlb >= 0 ) {
   556             struct itlb_entry *ent = &mmu_itlb[itlb];
   557             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   558         }
   560         if( itlb == -2 || utlb == -2 ) {
   561             MMU_TLB_MULTI_HIT_ERROR(addr);
   562             return;
   563         }
   564     } else {
   565         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   566         if( ent->flags & TLB_VALID ) 
   567             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
   568         ent->vpn = (val & 0xFFFFFC00);
   569         ent->asid = (val & 0xFF);
   570         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   571         ent->flags |= (val & TLB_VALID);
   572         ent->flags |= ((val & 0x200)>>7);
   573         if( ent->flags & TLB_VALID ) 
   574             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
   575     }
   576 }
   578 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   579 {
   580     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   581     if( UTLB_DATA2(addr) ) {
   582         ent->pcmcia = val & 0x0000000F;
   583     } else {
   584         if( ent->flags & TLB_VALID ) 
   585             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
   586         ent->ppn = (val & 0x1FFFFC00);
   587         ent->flags = (val & 0x000001FF);
   588         ent->mask = get_mask_for_flags(val);
   589         if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
   590             mmu_utlb[mmu_urc].ppn |= 0xE0000000;
   591         if( ent->flags & TLB_VALID ) 
   592             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
   593     }
   594 }
   596 /* Cache access - not implemented */
   598 int32_t mmu_icache_addr_read( sh4addr_t addr )
   599 {
   600     return 0; // not implemented
   601 }
   602 int32_t mmu_icache_data_read( sh4addr_t addr )
   603 {
   604     return 0; // not implemented
   605 }
   606 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   607 {
   608     return 0; // not implemented
   609 }
   610 int32_t mmu_ocache_data_read( sh4addr_t addr )
   611 {
   612     return 0; // not implemented
   613 }
   615 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   616 {
   617 }
   619 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   620 {
   621 }
   623 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   624 {
   625 }
   627 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   628 {
   629 }
   631 /******************************************************************************/
   632 /*                        MMU TLB address translation                         */
   633 /******************************************************************************/
   635 /**
   636  * The translations are excessively complicated, but unfortunately it's a
   637  * complicated system. TODO: make this not be painfully slow.
   638  */
   640 /**
   641  * Perform the actual utlb lookup w/ asid matching.
   642  * Possible utcomes are:
   643  *   0..63 Single match - good, return entry found
   644  *   -1 No match - raise a tlb data miss exception
   645  *   -2 Multiple matches - raise a multi-hit exception (reset)
   646  * @param vpn virtual address to resolve
   647  * @return the resultant UTLB entry, or an error.
   648  */
   649 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   650 {
   651     int result = -1;
   652     unsigned int i;
   654     mmu_urc++;
   655     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   656         mmu_urc = 0;
   657     }
   659     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   660         if( (mmu_utlb[i].flags & TLB_VALID) &&
   661                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   662                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   663             if( result != -1 ) {
   664                 return -2;
   665             }
   666             result = i;
   667         }
   668     }
   669     return result;
   670 }
   672 /**
   673  * Perform the actual utlb lookup matching on vpn only
   674  * Possible utcomes are:
   675  *   0..63 Single match - good, return entry found
   676  *   -1 No match - raise a tlb data miss exception
   677  *   -2 Multiple matches - raise a multi-hit exception (reset)
   678  * @param vpn virtual address to resolve
   679  * @return the resultant UTLB entry, or an error.
   680  */
   681 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   682 {
   683     int result = -1;
   684     unsigned int i;
   686     mmu_urc++;
   687     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   688         mmu_urc = 0;
   689     }
   691     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   692         if( (mmu_utlb[i].flags & TLB_VALID) &&
   693                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   694             if( result != -1 ) {
   695                 return -2;
   696             }
   697             result = i;
   698         }
   699     }
   701     return result;
   702 }
   704 /**
   705  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   706  * @return the number (0-3) of the replaced entry.
   707  */
   708 static int inline mmu_itlb_update_from_utlb( int entryNo )
   709 {
   710     int replace;
   711     /* Determine entry to replace based on lrui */
   712     if( (mmu_lrui & 0x38) == 0x38 ) {
   713         replace = 0;
   714         mmu_lrui = mmu_lrui & 0x07;
   715     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   716         replace = 1;
   717         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   718     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   719         replace = 2;
   720         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   721     } else { // Note - gets invalid entries too
   722         replace = 3;
   723         mmu_lrui = (mmu_lrui | 0x0B);
   724     }
   726     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   727     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   728     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   729     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   730     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   731     return replace;
   732 }
   734 /**
   735  * Perform the actual itlb lookup w/ asid protection
   736  * Possible utcomes are:
   737  *   0..63 Single match - good, return entry found
   738  *   -1 No match - raise a tlb data miss exception
   739  *   -2 Multiple matches - raise a multi-hit exception (reset)
   740  * @param vpn virtual address to resolve
   741  * @return the resultant ITLB entry, or an error.
   742  */
   743 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   744 {
   745     int result = -1;
   746     unsigned int i;
   748     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   749         if( (mmu_itlb[i].flags & TLB_VALID) &&
   750                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   751                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   752             if( result != -1 ) {
   753                 return -2;
   754             }
   755             result = i;
   756         }
   757     }
   759     if( result == -1 ) {
   760         int utlbEntry = mmu_utlb_sorted_find( vpn );
   761         if( utlbEntry < 0 ) {
   762             return utlbEntry;
   763         } else {
   764             return mmu_itlb_update_from_utlb( utlbEntry );
   765         }
   766     }
   768     switch( result ) {
   769     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   770     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   771     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   772     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   773     }
   775     return result;
   776 }
   778 /**
   779  * Perform the actual itlb lookup on vpn only
   780  * Possible utcomes are:
   781  *   0..63 Single match - good, return entry found
   782  *   -1 No match - raise a tlb data miss exception
   783  *   -2 Multiple matches - raise a multi-hit exception (reset)
   784  * @param vpn virtual address to resolve
   785  * @return the resultant ITLB entry, or an error.
   786  */
   787 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   788 {
   789     int result = -1;
   790     unsigned int i;
   792     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   793         if( (mmu_itlb[i].flags & TLB_VALID) &&
   794                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   795             if( result != -1 ) {
   796                 return -2;
   797             }
   798             result = i;
   799         }
   800     }
   802     if( result == -1 ) {
   803         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   804         if( utlbEntry < 0 ) {
   805             return utlbEntry;
   806         } else {
   807             return mmu_itlb_update_from_utlb( utlbEntry );
   808         }
   809     }
   811     switch( result ) {
   812     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   813     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   814     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   815     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   816     }
   818     return result;
   819 }
   821 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
   822 {
   823     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   824     if( addr & 0x80000000 ) {
   825         if( IS_SH4_PRIVMODE() ) {
   826             if( addr >= 0xE0000000 ) {
   827                 return addr; /* P4 - passthrough */
   828             } else if( addr < 0xC0000000 ) {
   829                 /* P1, P2 regions are pass-through (no translation) */
   830                 return VMA_TO_EXT_ADDR(addr);
   831             }
   832         } else {
   833             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   834                     ((mmucr&MMUCR_SQMD) == 0) ) {
   835                 /* Conditional user-mode access to the store-queue (no translation) */
   836                 return addr;
   837             }
   838             MMU_READ_ADDR_ERROR();
   839             return MMU_VMA_ERROR;
   840         }
   841     }
   843     if( (mmucr & MMUCR_AT) == 0 ) {
   844         return VMA_TO_EXT_ADDR(addr);
   845     }
   847     /* If we get this far, translation is required */
   848     int entryNo;
   849     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   850         entryNo = mmu_utlb_sorted_find( addr );
   851     } else {
   852         entryNo = mmu_utlb_lookup_vpn( addr );
   853     }
   855     switch(entryNo) {
   856     case -1:
   857     MMU_TLB_READ_MISS_ERROR(addr);
   858     return MMU_VMA_ERROR;
   859     case -2:
   860     MMU_TLB_MULTI_HIT_ERROR(addr);
   861     return MMU_VMA_ERROR;
   862     default:
   863         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   864                 !IS_SH4_PRIVMODE() ) {
   865             /* protection violation */
   866             MMU_TLB_READ_PROT_ERROR(addr);
   867             return MMU_VMA_ERROR;
   868         }
   870         /* finally generate the target address */
   871         return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   872         	(addr & (~mmu_utlb[entryNo].mask));
   873     }
   874 }
   876 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
   877 {
   878     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   879     if( addr & 0x80000000 ) {
   880         if( IS_SH4_PRIVMODE() ) {
   881             if( addr >= 0xE0000000 ) {
   882                 return addr; /* P4 - passthrough */
   883             } else if( addr < 0xC0000000 ) {
   884                 /* P1, P2 regions are pass-through (no translation) */
   885                 return VMA_TO_EXT_ADDR(addr);
   886             }
   887         } else {
   888             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   889                     ((mmucr&MMUCR_SQMD) == 0) ) {
   890                 /* Conditional user-mode access to the store-queue (no translation) */
   891                 return addr;
   892             }
   893             MMU_WRITE_ADDR_ERROR();
   894             return MMU_VMA_ERROR;
   895         }
   896     }
   898     if( (mmucr & MMUCR_AT) == 0 ) {
   899         return VMA_TO_EXT_ADDR(addr);
   900     }
   902     /* If we get this far, translation is required */
   903     int entryNo;
   904     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   905         entryNo = mmu_utlb_sorted_find( addr );
   906     } else {
   907         entryNo = mmu_utlb_lookup_vpn( addr );
   908     }
   910     switch(entryNo) {
   911     case -1:
   912     MMU_TLB_WRITE_MISS_ERROR(addr);
   913     return MMU_VMA_ERROR;
   914     case -2:
   915     MMU_TLB_MULTI_HIT_ERROR(addr);
   916     return MMU_VMA_ERROR;
   917     default:
   918         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   919                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   920             /* protection violation */
   921             MMU_TLB_WRITE_PROT_ERROR(addr);
   922             return MMU_VMA_ERROR;
   923         }
   925         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   926             MMU_TLB_INITIAL_WRITE_ERROR(addr);
   927             return MMU_VMA_ERROR;
   928         }
   930         /* finally generate the target address */
   931         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   932         	(addr & (~mmu_utlb[entryNo].mask));
   933         return pma;
   934     }
   935 }
   937 /**
   938  * Update the icache for an untranslated address
   939  */
   940 static inline void mmu_update_icache_phys( sh4addr_t addr )
   941 {
   942     if( (addr & 0x1C000000) == 0x0C000000 ) {
   943         /* Main ram */
   944         sh4_icache.page_vma = addr & 0xFF000000;
   945         sh4_icache.page_ppa = 0x0C000000;
   946         sh4_icache.mask = 0xFF000000;
   947         sh4_icache.page = sh4_main_ram;
   948     } else if( (addr & 0x1FE00000) == 0 ) {
   949         /* BIOS ROM */
   950         sh4_icache.page_vma = addr & 0xFFE00000;
   951         sh4_icache.page_ppa = 0;
   952         sh4_icache.mask = 0xFFE00000;
   953         sh4_icache.page = mem_get_region(0);
   954     } else {
   955         /* not supported */
   956         sh4_icache.page_vma = -1;
   957     }
   958 }
   960 /**
   961  * Update the sh4_icache structure to describe the page(s) containing the
   962  * given vma. If the address does not reference a RAM/ROM region, the icache
   963  * will be invalidated instead.
   964  * If AT is on, this method will raise TLB exceptions normally
   965  * (hence this method should only be used immediately prior to execution of
   966  * code), and otherwise will set the icache according to the matching TLB entry.
   967  * If AT is off, this method will set the entire referenced RAM/ROM region in
   968  * the icache.
   969  * @return TRUE if the update completed (successfully or otherwise), FALSE
   970  * if an exception was raised.
   971  */
   972 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
   973 {
   974     int entryNo;
   975     if( IS_SH4_PRIVMODE()  ) {
   976         if( addr & 0x80000000 ) {
   977             if( addr < 0xC0000000 ) {
   978                 /* P1, P2 and P4 regions are pass-through (no translation) */
   979                 mmu_update_icache_phys(addr);
   980                 return TRUE;
   981             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   982                 MMU_READ_ADDR_ERROR();
   983                 return FALSE;
   984             }
   985         }
   987         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   988         if( (mmucr & MMUCR_AT) == 0 ) {
   989             mmu_update_icache_phys(addr);
   990             return TRUE;
   991         }
   993         if( (mmucr & MMUCR_SV) == 0 )
   994         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
   995         else
   996         	entryNo = mmu_itlb_lookup_vpn( addr );
   997     } else {
   998         if( addr & 0x80000000 ) {
   999             MMU_READ_ADDR_ERROR();
  1000             return FALSE;
  1003         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1004         if( (mmucr & MMUCR_AT) == 0 ) {
  1005             mmu_update_icache_phys(addr);
  1006             return TRUE;
  1009         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1011         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1012             MMU_TLB_READ_PROT_ERROR(addr);
  1013             return FALSE;
  1017     switch(entryNo) {
  1018     case -1:
  1019     MMU_TLB_READ_MISS_ERROR(addr);
  1020     return FALSE;
  1021     case -2:
  1022     MMU_TLB_MULTI_HIT_ERROR(addr);
  1023     return FALSE;
  1024     default:
  1025         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1026         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1027         if( sh4_icache.page == NULL ) {
  1028             sh4_icache.page_vma = -1;
  1029         } else {
  1030             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1031             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1033         return TRUE;
  1037 /**
  1038  * Translate address for disassembly purposes (ie performs an instruction
  1039  * lookup) - does not raise exceptions or modify any state, and ignores
  1040  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1041  * on translation failure.
  1042  */
  1043 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1045     if( vma & 0x80000000 ) {
  1046         if( vma < 0xC0000000 ) {
  1047             /* P1, P2 and P4 regions are pass-through (no translation) */
  1048             return VMA_TO_EXT_ADDR(vma);
  1049         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1050             /* Not translatable */
  1051             return MMU_VMA_ERROR;
  1055     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1056     if( (mmucr & MMUCR_AT) == 0 ) {
  1057         return VMA_TO_EXT_ADDR(vma);
  1060     int entryNo = mmu_itlb_lookup_vpn( vma );
  1061     if( entryNo == -2 ) {
  1062         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1064     if( entryNo < 0 ) {
  1065         return MMU_VMA_ERROR;
  1066     } else {
  1067         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1068         (vma & (~mmu_itlb[entryNo].mask));
  1072 void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
  1074     int queue = (addr&0x20)>>2;
  1075     uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
  1076     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
  1077     sh4addr_t target = (addr&0x03FFFFE0) | hi;
  1078     mem_copy_to_sh4( target, src, 32 );
  1081 gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )
  1083     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1084     int queue = (addr&0x20)>>2;
  1085     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
  1086     sh4addr_t target;
  1087     /* Store queue operation */
  1089     int entryNo;
  1090     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
  1091     	entryNo = mmu_utlb_lookup_vpn_asid( addr );
  1092     } else {
  1093     	entryNo = mmu_utlb_lookup_vpn( addr );
  1095     switch(entryNo) {
  1096     case -1:
  1097     MMU_TLB_WRITE_MISS_ERROR(addr);
  1098     return FALSE;
  1099     case -2:
  1100     MMU_TLB_MULTI_HIT_ERROR(addr);
  1101     return FALSE;
  1102     default:
  1103     	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
  1104     			: ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
  1105     		/* protection violation */
  1106     		MMU_TLB_WRITE_PROT_ERROR(addr);
  1107     		return FALSE;
  1110     	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
  1111     		MMU_TLB_INITIAL_WRITE_ERROR(addr);
  1112     		return FALSE;
  1115     	/* finally generate the target address */
  1116     	target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
  1117     			(addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
  1120     mem_copy_to_sh4( target, src, 32 );
  1121     return TRUE;
.