Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 929:fd8cb0c82f5f
prev927:17b6b9e245d8
next931:430048ea8b71
author nkeynes
date Mon Dec 22 09:51:11 2008 +0000 (15 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Remove pointer cache and add full address-space map. Much better
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include <assert.h>
    22 #include "sh4/sh4mmio.h"
    23 #include "sh4/sh4core.h"
    24 #include "sh4/sh4trans.h"
    25 #include "mem.h"
    27 #ifdef HAVE_FRAME_ADDRESS
    28 #define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
    29 #else
    30 #define RETURN_VIA(exc) return MMU_VMA_ERROR
    31 #endif
    33 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    35 /* The MMU (practically unique in the system) is allowed to raise exceptions
    36  * directly, with a return code indicating that one was raised and the caller
    37  * had better behave appropriately.
    38  */
    39 #define RAISE_TLB_ERROR(code, vpn) \
    40     MMIO_WRITE(MMU, TEA, vpn); \
    41     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    42     sh4_raise_tlb_exception(code);
    44 #define RAISE_MEM_ERROR(code, vpn) \
    45     MMIO_WRITE(MMU, TEA, vpn); \
    46     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    47     sh4_raise_exception(code);
    49 #define RAISE_OTHER_ERROR(code) \
    50     sh4_raise_exception(code);
    51 /**
    52  * Abort with a non-MMU address error. Caused by user-mode code attempting
    53  * to access privileged regions, or alignment faults.
    54  */
    55 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    56 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    58 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    59 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    60 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    61 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    62 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    63 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    64     MMIO_WRITE(MMU, TEA, vpn); \
    65     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    68 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
    69 #define OCRAM_END   (0x20000000>>LXDREAM_PAGE_BITS)
    71 #define ITLB_ENTRY_COUNT 4
    72 #define UTLB_ENTRY_COUNT 64
    74 /* Entry address */
    75 #define TLB_VALID     0x00000100
    76 #define TLB_USERMODE  0x00000040
    77 #define TLB_WRITABLE  0x00000020
    78 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    79 #define TLB_SIZE_MASK 0x00000090
    80 #define TLB_SIZE_1K   0x00000000
    81 #define TLB_SIZE_4K   0x00000010
    82 #define TLB_SIZE_64K  0x00000080
    83 #define TLB_SIZE_1M   0x00000090
    84 #define TLB_CACHEABLE 0x00000008
    85 #define TLB_DIRTY     0x00000004
    86 #define TLB_SHARE     0x00000002
    87 #define TLB_WRITETHRU 0x00000001
    89 #define MASK_1K  0xFFFFFC00
    90 #define MASK_4K  0xFFFFF000
    91 #define MASK_64K 0xFFFF0000
    92 #define MASK_1M  0xFFF00000
    94 struct itlb_entry {
    95     sh4addr_t vpn; // Virtual Page Number
    96     uint32_t asid; // Process ID
    97     uint32_t mask;
    98     sh4addr_t ppn; // Physical Page Number
    99     uint32_t flags;
   100 };
   102 struct utlb_entry {
   103     sh4addr_t vpn; // Virtual Page Number
   104     uint32_t mask; // Page size mask
   105     uint32_t asid; // Process ID
   106     sh4addr_t ppn; // Physical Page Number
   107     uint32_t flags;
   108     uint32_t pcmcia; // extra pcmcia data - not used
   109 };
   111 struct utlb_sort_entry {
   112     sh4addr_t key; // Masked VPN + ASID
   113     uint32_t mask; // Mask + 0x00FF
   114     int entryNo;
   115 };
   118 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   119 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   120 static uint32_t mmu_urc;
   121 static uint32_t mmu_urb;
   122 static uint32_t mmu_lrui;
   123 static uint32_t mmu_asid; // current asid
   125 static struct utlb_sort_entry mmu_utlb_sorted[UTLB_ENTRY_COUNT];
   126 static uint32_t mmu_utlb_entries; // Number of entries in mmu_utlb_sorted. 
   128 static sh4ptr_t cache = NULL;
   130 static void mmu_invalidate_tlb();
   131 static void mmu_utlb_sorted_reset();
   132 static void mmu_utlb_sorted_reload(); 
   135 static uint32_t get_mask_for_flags( uint32_t flags )
   136 {
   137     switch( flags & TLB_SIZE_MASK ) {
   138     case TLB_SIZE_1K: return MASK_1K;
   139     case TLB_SIZE_4K: return MASK_4K;
   140     case TLB_SIZE_64K: return MASK_64K;
   141     case TLB_SIZE_1M: return MASK_1M;
   142     default: return 0; /* Unreachable */
   143     }
   144 }
   146 MMIO_REGION_READ_FN( MMU, reg )
   147 {
   148     reg &= 0xFFF;
   149     switch( reg ) {
   150     case MMUCR:
   151         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   152     default:
   153         return MMIO_READ( MMU, reg );
   154     }
   155 }
   157 MMIO_REGION_WRITE_FN( MMU, reg, val )
   158 {
   159     uint32_t tmp;
   160     reg &= 0xFFF;
   161     switch(reg) {
   162     case SH4VER:
   163         return;
   164     case PTEH:
   165         val &= 0xFFFFFCFF;
   166         if( (val & 0xFF) != mmu_asid ) {
   167             mmu_asid = val&0xFF;
   168             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   169         }
   170         break;
   171     case PTEL:
   172         val &= 0x1FFFFDFF;
   173         break;
   174     case PTEA:
   175         val &= 0x0000000F;
   176         break;
   177     case TRA:
   178     	val &= 0x000003FC;
   179     	break;
   180     case EXPEVT:
   181     case INTEVT:
   182     	val &= 0x00000FFF;
   183     	break;
   184     case MMUCR:
   185         if( val & MMUCR_TI ) {
   186             mmu_invalidate_tlb();
   187         }
   188         mmu_urc = (val >> 10) & 0x3F;
   189         mmu_urb = (val >> 18) & 0x3F;
   190         mmu_lrui = (val >> 26) & 0x3F;
   191         val &= 0x00000301;
   192         tmp = MMIO_READ( MMU, MMUCR );
   193         if( (val ^ tmp) & (MMUCR_AT|MMUCR_SV) ) {
   194             // AT flag has changed state - flush the xlt cache as all bets
   195             // are off now. We also need to force an immediate exit from the
   196             // current block
   197             MMIO_WRITE( MMU, MMUCR, val );
   198             sh4_flush_icache();
   199         }
   200         break;
   201     case CCR:
   202         mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
   203         val &= 0x81A7;
   204         break;
   205     case MMUUNK1:
   206     	/* Note that if the high bit is set, this appears to reset the machine.
   207     	 * Not emulating this behaviour yet until we know why...
   208     	 */
   209     	val &= 0x00010007;
   210     	break;
   211     case QACR0:
   212     case QACR1:
   213     	val &= 0x0000001C;
   214     	break;
   215     case PMCR1:
   216         PMM_write_control(0, val);
   217         val &= 0x0000C13F;
   218         break;
   219     case PMCR2:
   220         PMM_write_control(1, val);
   221         val &= 0x0000C13F;
   222         break;
   223     default:
   224         break;
   225     }
   226     MMIO_WRITE( MMU, reg, val );
   227 }
   230 void MMU_init()
   231 {
   232     cache = mem_alloc_pages(2);
   233 }
   235 void MMU_reset()
   236 {
   237     mmio_region_MMU_write( CCR, 0 );
   238     mmio_region_MMU_write( MMUCR, 0 );
   239     mmu_utlb_sorted_reload();
   240 }
   242 void MMU_save_state( FILE *f )
   243 {
   244     fwrite( cache, 4096, 2, f );
   245     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   246     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   247     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   248     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   249     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   250     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   251 }
   253 int MMU_load_state( FILE *f )
   254 {
   255     /* Setup the cache mode according to the saved register value
   256      * (mem_load runs before this point to load all MMIO data)
   257      */
   258     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   259     if( fread( cache, 4096, 2, f ) != 2 ) {
   260         return 1;
   261     }
   262     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   263         return 1;
   264     }
   265     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   266         return 1;
   267     }
   268     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   269         return 1;
   270     }
   271     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   272         return 1;
   273     }
   274     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   275         return 1;
   276     }
   277     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   278         return 1;
   279     }
   280     mmu_utlb_sorted_reload();
   281     return 0;
   282 }
   284 void mmu_set_cache_mode( int mode )
   285 {
   286     uint32_t i;
   287     switch( mode ) {
   288     case MEM_OC_INDEX0: /* OIX=0 */
   289         for( i=OCRAM_START; i<OCRAM_END; i++ )
   290             page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
   291         break;
   292     case MEM_OC_INDEX1: /* OIX=1 */
   293         for( i=OCRAM_START; i<OCRAM_END; i++ )
   294             page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
   295         break;
   296     default: /* disabled */
   297         for( i=OCRAM_START; i<OCRAM_END; i++ )
   298             page_map[i] = NULL;
   299         break;
   300     }
   301 }
   303 /******************* Sorted TLB data structure ****************/
   304 /*
   305  * mmu_utlb_sorted maintains a list of all active (valid) entries,
   306  * sorted by masked VPN and then ASID. Multi-hit entries are resolved 
   307  * ahead of time, and have -1 recorded as the corresponding PPN.
   308  * 
   309  * FIXME: Multi-hit detection doesn't pick up cases where two pages 
   310  * overlap due to different sizes (and don't share the same base
   311  * address). 
   312  */ 
   313 static void mmu_utlb_sorted_reset() 
   314 {
   315     mmu_utlb_entries = 0;
   316 }
   318 /**
   319  * Find an entry in the sorted table (VPN+ASID check). 
   320  */
   321 static inline int mmu_utlb_sorted_find( sh4addr_t vma )
   322 {
   323     int low = 0;
   324     int high = mmu_utlb_entries;
   325     uint32_t lookup = (vma & 0xFFFFFC00) + mmu_asid;
   327     mmu_urc++;
   328     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   329         mmu_urc = 0;
   330     }
   332     while( low != high ) {
   333         int posn = (high+low)>>1;
   334         int masked = lookup & mmu_utlb_sorted[posn].mask;
   335         if( mmu_utlb_sorted[posn].key < masked ) {
   336             low = posn+1;
   337         } else if( mmu_utlb_sorted[posn].key > masked ) {
   338             high = posn;
   339         } else {
   340             return mmu_utlb_sorted[posn].entryNo;
   341         }
   342     }
   343     return -1;
   345 }
   347 static void mmu_utlb_insert_entry( int entry )
   348 {
   349     int low = 0;
   350     int high = mmu_utlb_entries;
   351     uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
   353     assert( mmu_utlb_entries < UTLB_ENTRY_COUNT );
   354     /* Find the insertion point */
   355     while( low != high ) {
   356         int posn = (high+low)>>1;
   357         if( mmu_utlb_sorted[posn].key < key ) {
   358             low = posn+1;
   359         } else if( mmu_utlb_sorted[posn].key > key ) {
   360             high = posn;
   361         } else {
   362             /* Exact match - multi-hit */
   363             mmu_utlb_sorted[posn].entryNo = -2;
   364             return;
   365         }
   366     } /* 0 2 4 6 */
   367     memmove( &mmu_utlb_sorted[low+1], &mmu_utlb_sorted[low], 
   368              (mmu_utlb_entries - low) * sizeof(struct utlb_sort_entry) );
   369     mmu_utlb_sorted[low].key = key;
   370     mmu_utlb_sorted[low].mask = mmu_utlb[entry].mask | 0x000000FF;
   371     mmu_utlb_sorted[low].entryNo = entry;
   372     mmu_utlb_entries++;
   373 }
   375 static void mmu_utlb_remove_entry( int entry )
   376 {
   377     int low = 0;
   378     int high = mmu_utlb_entries;
   379     uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
   380     while( low != high ) {
   381         int posn = (high+low)>>1;
   382         if( mmu_utlb_sorted[posn].key < key ) {
   383             low = posn+1;
   384         } else if( mmu_utlb_sorted[posn].key > key ) {
   385             high = posn;
   386         } else {
   387             if( mmu_utlb_sorted[posn].entryNo == -2 ) {
   388                 /* Multiple-entry recorded - rebuild the whole table minus entry */
   389                 int i;
   390                 mmu_utlb_entries = 0;
   391                 for( i=0; i< UTLB_ENTRY_COUNT; i++ ) {
   392                     if( i != entry && (mmu_utlb[i].flags & TLB_VALID)  ) {
   393                         mmu_utlb_insert_entry(i);
   394                     }
   395                 }
   396             } else {
   397                 mmu_utlb_entries--;
   398                 memmove( &mmu_utlb_sorted[posn], &mmu_utlb_sorted[posn+1],
   399                          (mmu_utlb_entries - posn)*sizeof(struct utlb_sort_entry) );
   400             }
   401             return;
   402         }
   403     }
   404     assert( 0 && "UTLB key not found!" );
   405 }
   407 static void mmu_utlb_sorted_reload()
   408 {
   409     int i;
   410     mmu_utlb_entries = 0;
   411     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   412         if( mmu_utlb[i].flags & TLB_VALID ) 
   413             mmu_utlb_insert_entry( i );
   414     }
   415 }
   417 /* TLB maintanence */
   419 /**
   420  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   421  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   422  */
   423 void MMU_ldtlb()
   424 {
   425     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   426         mmu_utlb_remove_entry( mmu_urc );
   427     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   428     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   429     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   430     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   431     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   432     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   433     if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
   434         mmu_utlb[mmu_urc].ppn |= 0xE0000000;
   435     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   436         mmu_utlb_insert_entry( mmu_urc );
   437 }
   439 static void mmu_invalidate_tlb()
   440 {
   441     int i;
   442     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   443         mmu_itlb[i].flags &= (~TLB_VALID);
   444     }
   445     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   446         mmu_utlb[i].flags &= (~TLB_VALID);
   447     }
   448     mmu_utlb_entries = 0;
   449 }
   451 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   453 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
   454 {
   455     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   456     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   457 }
   458 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
   459 {
   460     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   461     return (ent->ppn & 0x1FFFFC00) | ent->flags;
   462 }
   464 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   465 {
   466     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   467     ent->vpn = val & 0xFFFFFC00;
   468     ent->asid = val & 0x000000FF;
   469     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   470 }
   472 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   473 {
   474     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   475     ent->ppn = val & 0x1FFFFC00;
   476     ent->flags = val & 0x00001DA;
   477     ent->mask = get_mask_for_flags(val);
   478     if( ent->ppn >= 0x1C000000 )
   479         ent->ppn |= 0xE0000000;
   480 }
   482 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   483 #define UTLB_ASSOC(addr) (addr&0x80)
   484 #define UTLB_DATA2(addr) (addr&0x00800000)
   486 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
   487 {
   488     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   489     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   490     ((ent->flags & TLB_DIRTY)<<7);
   491 }
   492 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
   493 {
   494     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   495     if( UTLB_DATA2(addr) ) {
   496         return ent->pcmcia;
   497     } else {
   498         return (ent->ppn&0x1FFFFC00) | ent->flags;
   499     }
   500 }
   502 /**
   503  * Find a UTLB entry for the associative TLB write - same as the normal
   504  * lookup but ignores the valid bit.
   505  */
   506 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   507 {
   508     int result = -1;
   509     unsigned int i;
   510     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   511         if( (mmu_utlb[i].flags & TLB_VALID) &&
   512                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
   513                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   514             if( result != -1 ) {
   515                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   516                 return -2;
   517             }
   518             result = i;
   519         }
   520     }
   521     return result;
   522 }
   524 /**
   525  * Find a ITLB entry for the associative TLB write - same as the normal
   526  * lookup but ignores the valid bit.
   527  */
   528 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   529 {
   530     int result = -1;
   531     unsigned int i;
   532     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   533         if( (mmu_itlb[i].flags & TLB_VALID) &&
   534                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
   535                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   536             if( result != -1 ) {
   537                 return -2;
   538             }
   539             result = i;
   540         }
   541     }
   542     return result;
   543 }
   545 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   546 {
   547     if( UTLB_ASSOC(addr) ) {
   548         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   549         if( utlb >= 0 ) {
   550             struct utlb_entry *ent = &mmu_utlb[utlb];
   551             uint32_t old_flags = ent->flags;
   552             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   553             ent->flags |= (val & TLB_VALID);
   554             ent->flags |= ((val & 0x200)>>7);
   555             if( (old_flags & TLB_VALID) && !(ent->flags&TLB_VALID) ) {
   556                 mmu_utlb_remove_entry( utlb );
   557             } else if( !(old_flags & TLB_VALID) && (ent->flags&TLB_VALID) ) {
   558                 mmu_utlb_insert_entry( utlb );
   559             }
   560         }
   562         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   563         if( itlb >= 0 ) {
   564             struct itlb_entry *ent = &mmu_itlb[itlb];
   565             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   566         }
   568         if( itlb == -2 || utlb == -2 ) {
   569             MMU_TLB_MULTI_HIT_ERROR(addr);
   570             return;
   571         }
   572     } else {
   573         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   574         if( ent->flags & TLB_VALID ) 
   575             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
   576         ent->vpn = (val & 0xFFFFFC00);
   577         ent->asid = (val & 0xFF);
   578         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   579         ent->flags |= (val & TLB_VALID);
   580         ent->flags |= ((val & 0x200)>>7);
   581         if( ent->flags & TLB_VALID ) 
   582             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
   583     }
   584 }
   586 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   587 {
   588     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   589     if( UTLB_DATA2(addr) ) {
   590         ent->pcmcia = val & 0x0000000F;
   591     } else {
   592         if( ent->flags & TLB_VALID ) 
   593             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
   594         ent->ppn = (val & 0x1FFFFC00);
   595         ent->flags = (val & 0x000001FF);
   596         ent->mask = get_mask_for_flags(val);
   597         if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
   598             mmu_utlb[mmu_urc].ppn |= 0xE0000000;
   599         if( ent->flags & TLB_VALID ) 
   600             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
   601     }
   602 }
   604 /* Cache access - not implemented */
   606 int32_t FASTCALL mmu_icache_addr_read( sh4addr_t addr )
   607 {
   608     return 0; // not implemented
   609 }
   610 int32_t FASTCALL mmu_icache_data_read( sh4addr_t addr )
   611 {
   612     return 0; // not implemented
   613 }
   614 int32_t FASTCALL mmu_ocache_addr_read( sh4addr_t addr )
   615 {
   616     return 0; // not implemented
   617 }
   618 int32_t FASTCALL mmu_ocache_data_read( sh4addr_t addr )
   619 {
   620     return 0; // not implemented
   621 }
   623 void FASTCALL mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   624 {
   625 }
   627 void FASTCALL mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   628 {
   629 }
   631 void FASTCALL mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   632 {
   633 }
   635 void FASTCALL mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   636 {
   637 }
   639 /******************************************************************************/
   640 /*                        MMU TLB address translation                         */
   641 /******************************************************************************/
   643 /**
   644  * The translations are excessively complicated, but unfortunately it's a
   645  * complicated system. TODO: make this not be painfully slow.
   646  */
   648 /**
   649  * Perform the actual utlb lookup w/ asid matching.
   650  * Possible utcomes are:
   651  *   0..63 Single match - good, return entry found
   652  *   -1 No match - raise a tlb data miss exception
   653  *   -2 Multiple matches - raise a multi-hit exception (reset)
   654  * @param vpn virtual address to resolve
   655  * @return the resultant UTLB entry, or an error.
   656  */
   657 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   658 {
   659     int result = -1;
   660     unsigned int i;
   662     mmu_urc++;
   663     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   664         mmu_urc = 0;
   665     }
   667     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   668         if( (mmu_utlb[i].flags & TLB_VALID) &&
   669                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   670                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   671             if( result != -1 ) {
   672                 return -2;
   673             }
   674             result = i;
   675         }
   676     }
   677     return result;
   678 }
   680 /**
   681  * Perform the actual utlb lookup matching on vpn only
   682  * Possible utcomes are:
   683  *   0..63 Single match - good, return entry found
   684  *   -1 No match - raise a tlb data miss exception
   685  *   -2 Multiple matches - raise a multi-hit exception (reset)
   686  * @param vpn virtual address to resolve
   687  * @return the resultant UTLB entry, or an error.
   688  */
   689 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   690 {
   691     int result = -1;
   692     unsigned int i;
   694     mmu_urc++;
   695     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   696         mmu_urc = 0;
   697     }
   699     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   700         if( (mmu_utlb[i].flags & TLB_VALID) &&
   701                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   702             if( result != -1 ) {
   703                 return -2;
   704             }
   705             result = i;
   706         }
   707     }
   709     return result;
   710 }
   712 /**
   713  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   714  * @return the number (0-3) of the replaced entry.
   715  */
   716 static int inline mmu_itlb_update_from_utlb( int entryNo )
   717 {
   718     int replace;
   719     /* Determine entry to replace based on lrui */
   720     if( (mmu_lrui & 0x38) == 0x38 ) {
   721         replace = 0;
   722         mmu_lrui = mmu_lrui & 0x07;
   723     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   724         replace = 1;
   725         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   726     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   727         replace = 2;
   728         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   729     } else { // Note - gets invalid entries too
   730         replace = 3;
   731         mmu_lrui = (mmu_lrui | 0x0B);
   732     }
   734     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   735     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   736     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   737     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   738     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   739     return replace;
   740 }
   742 /**
   743  * Perform the actual itlb lookup w/ asid protection
   744  * Possible utcomes are:
   745  *   0..63 Single match - good, return entry found
   746  *   -1 No match - raise a tlb data miss exception
   747  *   -2 Multiple matches - raise a multi-hit exception (reset)
   748  * @param vpn virtual address to resolve
   749  * @return the resultant ITLB entry, or an error.
   750  */
   751 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   752 {
   753     int result = -1;
   754     unsigned int i;
   756     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   757         if( (mmu_itlb[i].flags & TLB_VALID) &&
   758                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   759                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   760             if( result != -1 ) {
   761                 return -2;
   762             }
   763             result = i;
   764         }
   765     }
   767     if( result == -1 ) {
   768         int utlbEntry = mmu_utlb_sorted_find( vpn );
   769         if( utlbEntry < 0 ) {
   770             return utlbEntry;
   771         } else {
   772             return mmu_itlb_update_from_utlb( utlbEntry );
   773         }
   774     }
   776     switch( result ) {
   777     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   778     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   779     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   780     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   781     }
   783     return result;
   784 }
   786 /**
   787  * Perform the actual itlb lookup on vpn only
   788  * Possible utcomes are:
   789  *   0..63 Single match - good, return entry found
   790  *   -1 No match - raise a tlb data miss exception
   791  *   -2 Multiple matches - raise a multi-hit exception (reset)
   792  * @param vpn virtual address to resolve
   793  * @return the resultant ITLB entry, or an error.
   794  */
   795 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   796 {
   797     int result = -1;
   798     unsigned int i;
   800     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   801         if( (mmu_itlb[i].flags & TLB_VALID) &&
   802                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   803             if( result != -1 ) {
   804                 return -2;
   805             }
   806             result = i;
   807         }
   808     }
   810     if( result == -1 ) {
   811         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   812         if( utlbEntry < 0 ) {
   813             return utlbEntry;
   814         } else {
   815             return mmu_itlb_update_from_utlb( utlbEntry );
   816         }
   817     }
   819     switch( result ) {
   820     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   821     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   822     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   823     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   824     }
   826     return result;
   827 }
   829 #ifdef HAVE_FRAME_ADDRESS
   830 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc )
   831 #else
   832 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
   833 #endif
   834 {
   835     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   836     if( addr & 0x80000000 ) {
   837         if( IS_SH4_PRIVMODE() ) {
   838             if( addr >= 0xE0000000 ) {
   839                 return addr; /* P4 - passthrough */
   840             } else if( addr < 0xC0000000 ) {
   841                 /* P1, P2 regions are pass-through (no translation) */
   842                 return VMA_TO_EXT_ADDR(addr);
   843             }
   844         } else {
   845             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   846                     ((mmucr&MMUCR_SQMD) == 0) ) {
   847                 /* Conditional user-mode access to the store-queue (no translation) */
   848                 return addr;
   849             }
   850             MMU_READ_ADDR_ERROR();
   851             RETURN_VIA(exc);
   852         }
   853     }
   855     if( (mmucr & MMUCR_AT) == 0 ) {
   856         return VMA_TO_EXT_ADDR(addr);
   857     }
   859     /* If we get this far, translation is required */
   860     int entryNo;
   861     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   862         entryNo = mmu_utlb_sorted_find( addr );
   863     } else {
   864         entryNo = mmu_utlb_lookup_vpn( addr );
   865     }
   867     switch(entryNo) {
   868     case -1:
   869     MMU_TLB_READ_MISS_ERROR(addr);
   870     RETURN_VIA(exc);
   871     case -2:
   872     MMU_TLB_MULTI_HIT_ERROR(addr);
   873     RETURN_VIA(exc);
   874     default:
   875         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   876                 !IS_SH4_PRIVMODE() ) {
   877             /* protection violation */
   878             MMU_TLB_READ_PROT_ERROR(addr);
   879             RETURN_VIA(exc);
   880         }
   882         /* finally generate the target address */
   883         return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   884         	(addr & (~mmu_utlb[entryNo].mask));
   885     }
   886 }
   888 #ifdef HAVE_FRAME_ADDRESS
   889 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc )
   890 #else
   891 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
   892 #endif
   893 {
   894     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   895     if( addr & 0x80000000 ) {
   896         if( IS_SH4_PRIVMODE() ) {
   897             if( addr >= 0xE0000000 ) {
   898                 return addr; /* P4 - passthrough */
   899             } else if( addr < 0xC0000000 ) {
   900                 /* P1, P2 regions are pass-through (no translation) */
   901                 return VMA_TO_EXT_ADDR(addr);
   902             }
   903         } else {
   904             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   905                     ((mmucr&MMUCR_SQMD) == 0) ) {
   906                 /* Conditional user-mode access to the store-queue (no translation) */
   907                 return addr;
   908             }
   909             MMU_WRITE_ADDR_ERROR();
   910             RETURN_VIA(exc);
   911         }
   912     }
   914     if( (mmucr & MMUCR_AT) == 0 ) {
   915         return VMA_TO_EXT_ADDR(addr);
   916     }
   918     /* If we get this far, translation is required */
   919     int entryNo;
   920     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   921         entryNo = mmu_utlb_sorted_find( addr );
   922     } else {
   923         entryNo = mmu_utlb_lookup_vpn( addr );
   924     }
   926     switch(entryNo) {
   927     case -1:
   928     MMU_TLB_WRITE_MISS_ERROR(addr);
   929     RETURN_VIA(exc);
   930     case -2:
   931     MMU_TLB_MULTI_HIT_ERROR(addr);
   932     RETURN_VIA(exc);
   933     default:
   934         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   935                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   936             /* protection violation */
   937             MMU_TLB_WRITE_PROT_ERROR(addr);
   938             RETURN_VIA(exc);
   939         }
   941         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   942             MMU_TLB_INITIAL_WRITE_ERROR(addr);
   943             RETURN_VIA(exc);
   944         }
   946         /* finally generate the target address */
   947         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   948         	(addr & (~mmu_utlb[entryNo].mask));
   949         return pma;
   950     }
   951 }
   953 /**
   954  * Update the icache for an untranslated address
   955  */
   956 static inline void mmu_update_icache_phys( sh4addr_t addr )
   957 {
   958     if( (addr & 0x1C000000) == 0x0C000000 ) {
   959         /* Main ram */
   960         sh4_icache.page_vma = addr & 0xFF000000;
   961         sh4_icache.page_ppa = 0x0C000000;
   962         sh4_icache.mask = 0xFF000000;
   963         sh4_icache.page = sh4_main_ram;
   964     } else if( (addr & 0x1FE00000) == 0 ) {
   965         /* BIOS ROM */
   966         sh4_icache.page_vma = addr & 0xFFE00000;
   967         sh4_icache.page_ppa = 0;
   968         sh4_icache.mask = 0xFFE00000;
   969         sh4_icache.page = mem_get_region(0);
   970     } else {
   971         /* not supported */
   972         sh4_icache.page_vma = -1;
   973     }
   974 }
   976 /**
   977  * Update the sh4_icache structure to describe the page(s) containing the
   978  * given vma. If the address does not reference a RAM/ROM region, the icache
   979  * will be invalidated instead.
   980  * If AT is on, this method will raise TLB exceptions normally
   981  * (hence this method should only be used immediately prior to execution of
   982  * code), and otherwise will set the icache according to the matching TLB entry.
   983  * If AT is off, this method will set the entire referenced RAM/ROM region in
   984  * the icache.
   985  * @return TRUE if the update completed (successfully or otherwise), FALSE
   986  * if an exception was raised.
   987  */
   988 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
   989 {
   990     int entryNo;
   991     if( IS_SH4_PRIVMODE()  ) {
   992         if( addr & 0x80000000 ) {
   993             if( addr < 0xC0000000 ) {
   994                 /* P1, P2 and P4 regions are pass-through (no translation) */
   995                 mmu_update_icache_phys(addr);
   996                 return TRUE;
   997             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   998                 MMU_READ_ADDR_ERROR();
   999                 return FALSE;
  1003         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1004         if( (mmucr & MMUCR_AT) == 0 ) {
  1005             mmu_update_icache_phys(addr);
  1006             return TRUE;
  1009         if( (mmucr & MMUCR_SV) == 0 )
  1010         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1011         else
  1012         	entryNo = mmu_itlb_lookup_vpn( addr );
  1013     } else {
  1014         if( addr & 0x80000000 ) {
  1015             MMU_READ_ADDR_ERROR();
  1016             return FALSE;
  1019         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1020         if( (mmucr & MMUCR_AT) == 0 ) {
  1021             mmu_update_icache_phys(addr);
  1022             return TRUE;
  1025         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1027         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1028             MMU_TLB_READ_PROT_ERROR(addr);
  1029             return FALSE;
  1033     switch(entryNo) {
  1034     case -1:
  1035     MMU_TLB_READ_MISS_ERROR(addr);
  1036     return FALSE;
  1037     case -2:
  1038     MMU_TLB_MULTI_HIT_ERROR(addr);
  1039     return FALSE;
  1040     default:
  1041         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1042         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1043         if( sh4_icache.page == NULL ) {
  1044             sh4_icache.page_vma = -1;
  1045         } else {
  1046             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1047             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1049         return TRUE;
  1053 /**
  1054  * Translate address for disassembly purposes (ie performs an instruction
  1055  * lookup) - does not raise exceptions or modify any state, and ignores
  1056  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1057  * on translation failure.
  1058  */
  1059 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1061     if( vma & 0x80000000 ) {
  1062         if( vma < 0xC0000000 ) {
  1063             /* P1, P2 and P4 regions are pass-through (no translation) */
  1064             return VMA_TO_EXT_ADDR(vma);
  1065         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1066             /* Not translatable */
  1067             return MMU_VMA_ERROR;
  1071     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1072     if( (mmucr & MMUCR_AT) == 0 ) {
  1073         return VMA_TO_EXT_ADDR(vma);
  1076     int entryNo = mmu_itlb_lookup_vpn( vma );
  1077     if( entryNo == -2 ) {
  1078         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1080     if( entryNo < 0 ) {
  1081         return MMU_VMA_ERROR;
  1082     } else {
  1083         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1084         (vma & (~mmu_itlb[entryNo].mask));
  1088 void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
  1090     int queue = (addr&0x20)>>2;
  1091     uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
  1092     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
  1093     sh4addr_t target = (addr&0x03FFFFE0) | hi;
  1094     mem_copy_to_sh4( target, src, 32 );
  1097 gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )
  1099     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1100     int queue = (addr&0x20)>>2;
  1101     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
  1102     sh4addr_t target;
  1103     /* Store queue operation */
  1105     int entryNo;
  1106     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
  1107     	entryNo = mmu_utlb_lookup_vpn_asid( addr );
  1108     } else {
  1109     	entryNo = mmu_utlb_lookup_vpn( addr );
  1111     switch(entryNo) {
  1112     case -1:
  1113     MMU_TLB_WRITE_MISS_ERROR(addr);
  1114     return FALSE;
  1115     case -2:
  1116     MMU_TLB_MULTI_HIT_ERROR(addr);
  1117     return FALSE;
  1118     default:
  1119     	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
  1120     			: ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
  1121     		/* protection violation */
  1122     		MMU_TLB_WRITE_PROT_ERROR(addr);
  1123     		return FALSE;
  1126     	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
  1127     		MMU_TLB_INITIAL_WRITE_ERROR(addr);
  1128     		return FALSE;
  1131     	/* finally generate the target address */
  1132     	target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
  1133     			(addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
  1136     mem_copy_to_sh4( target, src, 32 );
  1137     return TRUE;
.