Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 927:17b6b9e245d8
prev915:c989eb4c22d8
next929:fd8cb0c82f5f
next953:f4a156508ad1
author nkeynes
date Mon Dec 15 10:44:56 2008 +0000 (13 years ago)
permissions -rw-r--r--
last change Add return-address-modifying exception return code to mmu TLB lookups (a little bit faster)
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include <assert.h>
    22 #include "sh4/sh4mmio.h"
    23 #include "sh4/sh4core.h"
    24 #include "sh4/sh4trans.h"
    25 #include "mem.h"
    27 #ifdef HAVE_FRAME_ADDRESS
    28 #define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
    29 #else
    30 #define RETURN_VIA(exc) return MMU_VMA_ERROR
    31 #endif
    33 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    35 /* The MMU (practically unique in the system) is allowed to raise exceptions
    36  * directly, with a return code indicating that one was raised and the caller
    37  * had better behave appropriately.
    38  */
    39 #define RAISE_TLB_ERROR(code, vpn) \
    40     MMIO_WRITE(MMU, TEA, vpn); \
    41     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    42     sh4_raise_tlb_exception(code);
    44 #define RAISE_MEM_ERROR(code, vpn) \
    45     MMIO_WRITE(MMU, TEA, vpn); \
    46     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    47     sh4_raise_exception(code);
    49 #define RAISE_OTHER_ERROR(code) \
    50     sh4_raise_exception(code);
    51 /**
    52  * Abort with a non-MMU address error. Caused by user-mode code attempting
    53  * to access privileged regions, or alignment faults.
    54  */
    55 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    56 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    58 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    59 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    60 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    61 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    62 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    63 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    64     MMIO_WRITE(MMU, TEA, vpn); \
    65     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    68 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
    69 #define OCRAM_END   (0x20000000>>LXDREAM_PAGE_BITS)
    71 #define ITLB_ENTRY_COUNT 4
    72 #define UTLB_ENTRY_COUNT 64
    74 /* Entry address */
    75 #define TLB_VALID     0x00000100
    76 #define TLB_USERMODE  0x00000040
    77 #define TLB_WRITABLE  0x00000020
    78 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    79 #define TLB_SIZE_MASK 0x00000090
    80 #define TLB_SIZE_1K   0x00000000
    81 #define TLB_SIZE_4K   0x00000010
    82 #define TLB_SIZE_64K  0x00000080
    83 #define TLB_SIZE_1M   0x00000090
    84 #define TLB_CACHEABLE 0x00000008
    85 #define TLB_DIRTY     0x00000004
    86 #define TLB_SHARE     0x00000002
    87 #define TLB_WRITETHRU 0x00000001
    89 #define MASK_1K  0xFFFFFC00
    90 #define MASK_4K  0xFFFFF000
    91 #define MASK_64K 0xFFFF0000
    92 #define MASK_1M  0xFFF00000
    94 struct itlb_entry {
    95     sh4addr_t vpn; // Virtual Page Number
    96     uint32_t asid; // Process ID
    97     uint32_t mask;
    98     sh4addr_t ppn; // Physical Page Number
    99     uint32_t flags;
   100 };
   102 struct utlb_entry {
   103     sh4addr_t vpn; // Virtual Page Number
   104     uint32_t mask; // Page size mask
   105     uint32_t asid; // Process ID
   106     sh4addr_t ppn; // Physical Page Number
   107     uint32_t flags;
   108     uint32_t pcmcia; // extra pcmcia data - not used
   109 };
   111 struct utlb_sort_entry {
   112     sh4addr_t key; // Masked VPN + ASID
   113     uint32_t mask; // Mask + 0x00FF
   114     int entryNo;
   115 };
   118 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   119 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   120 static uint32_t mmu_urc;
   121 static uint32_t mmu_urb;
   122 static uint32_t mmu_lrui;
   123 static uint32_t mmu_asid; // current asid
   125 static struct utlb_sort_entry mmu_utlb_sorted[UTLB_ENTRY_COUNT];
   126 static uint32_t mmu_utlb_entries; // Number of entries in mmu_utlb_sorted. 
   128 static sh4ptr_t cache = NULL;
   130 static void mmu_invalidate_tlb();
   131 static void mmu_utlb_sorted_reset();
   132 static void mmu_utlb_sorted_reload(); 
   135 static uint32_t get_mask_for_flags( uint32_t flags )
   136 {
   137     switch( flags & TLB_SIZE_MASK ) {
   138     case TLB_SIZE_1K: return MASK_1K;
   139     case TLB_SIZE_4K: return MASK_4K;
   140     case TLB_SIZE_64K: return MASK_64K;
   141     case TLB_SIZE_1M: return MASK_1M;
   142     default: return 0; /* Unreachable */
   143     }
   144 }
   146 int32_t mmio_region_MMU_read( uint32_t reg )
   147 {
   148     switch( reg ) {
   149     case MMUCR:
   150         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   151     default:
   152         return MMIO_READ( MMU, reg );
   153     }
   154 }
   156 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   157 {
   158     uint32_t tmp;
   159     switch(reg) {
   160     case SH4VER:
   161         return;
   162     case PTEH:
   163         val &= 0xFFFFFCFF;
   164         if( (val & 0xFF) != mmu_asid ) {
   165             mmu_asid = val&0xFF;
   166             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   167         }
   168         break;
   169     case PTEL:
   170         val &= 0x1FFFFDFF;
   171         break;
   172     case PTEA:
   173         val &= 0x0000000F;
   174         break;
   175     case TRA:
   176     	val &= 0x000003FC;
   177     	break;
   178     case EXPEVT:
   179     case INTEVT:
   180     	val &= 0x00000FFF;
   181     	break;
   182     case MMUCR:
   183         if( val & MMUCR_TI ) {
   184             mmu_invalidate_tlb();
   185         }
   186         mmu_urc = (val >> 10) & 0x3F;
   187         mmu_urb = (val >> 18) & 0x3F;
   188         mmu_lrui = (val >> 26) & 0x3F;
   189         val &= 0x00000301;
   190         tmp = MMIO_READ( MMU, MMUCR );
   191         if( (val ^ tmp) & (MMUCR_AT|MMUCR_SV) ) {
   192             // AT flag has changed state - flush the xlt cache as all bets
   193             // are off now. We also need to force an immediate exit from the
   194             // current block
   195             MMIO_WRITE( MMU, MMUCR, val );
   196             sh4_flush_icache();
   197         }
   198         break;
   199     case CCR:
   200         mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
   201         val &= 0x81A7;
   202         break;
   203     case MMUUNK1:
   204     	/* Note that if the high bit is set, this appears to reset the machine.
   205     	 * Not emulating this behaviour yet until we know why...
   206     	 */
   207     	val &= 0x00010007;
   208     	break;
   209     case QACR0:
   210     case QACR1:
   211     	val &= 0x0000001C;
   212     	break;
   213     case PMCR1:
   214         PMM_write_control(0, val);
   215         val &= 0x0000C13F;
   216         break;
   217     case PMCR2:
   218         PMM_write_control(1, val);
   219         val &= 0x0000C13F;
   220         break;
   221     default:
   222         break;
   223     }
   224     MMIO_WRITE( MMU, reg, val );
   225 }
   228 void MMU_init()
   229 {
   230     cache = mem_alloc_pages(2);
   231 }
   233 void MMU_reset()
   234 {
   235     mmio_region_MMU_write( CCR, 0 );
   236     mmio_region_MMU_write( MMUCR, 0 );
   237     mmu_utlb_sorted_reload();
   238 }
   240 void MMU_save_state( FILE *f )
   241 {
   242     fwrite( cache, 4096, 2, f );
   243     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   244     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   245     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   246     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   247     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   248     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   249 }
   251 int MMU_load_state( FILE *f )
   252 {
   253     /* Setup the cache mode according to the saved register value
   254      * (mem_load runs before this point to load all MMIO data)
   255      */
   256     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   257     if( fread( cache, 4096, 2, f ) != 2 ) {
   258         return 1;
   259     }
   260     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   261         return 1;
   262     }
   263     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   264         return 1;
   265     }
   266     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   267         return 1;
   268     }
   269     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   270         return 1;
   271     }
   272     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   273         return 1;
   274     }
   275     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   276         return 1;
   277     }
   278     mmu_utlb_sorted_reload();
   279     return 0;
   280 }
   282 void mmu_set_cache_mode( int mode )
   283 {
   284     uint32_t i;
   285     switch( mode ) {
   286     case MEM_OC_INDEX0: /* OIX=0 */
   287         for( i=OCRAM_START; i<OCRAM_END; i++ )
   288             page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
   289         break;
   290     case MEM_OC_INDEX1: /* OIX=1 */
   291         for( i=OCRAM_START; i<OCRAM_END; i++ )
   292             page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
   293         break;
   294     default: /* disabled */
   295         for( i=OCRAM_START; i<OCRAM_END; i++ )
   296             page_map[i] = NULL;
   297         break;
   298     }
   299 }
   301 /******************* Sorted TLB data structure ****************/
   302 /*
   303  * mmu_utlb_sorted maintains a list of all active (valid) entries,
   304  * sorted by masked VPN and then ASID. Multi-hit entries are resolved 
   305  * ahead of time, and have -1 recorded as the corresponding PPN.
   306  * 
   307  * FIXME: Multi-hit detection doesn't pick up cases where two pages 
   308  * overlap due to different sizes (and don't share the same base
   309  * address). 
   310  */ 
   311 static void mmu_utlb_sorted_reset() 
   312 {
   313     mmu_utlb_entries = 0;
   314 }
   316 /**
   317  * Find an entry in the sorted table (VPN+ASID check). 
   318  */
   319 static inline int mmu_utlb_sorted_find( sh4addr_t vma )
   320 {
   321     int low = 0;
   322     int high = mmu_utlb_entries;
   323     uint32_t lookup = (vma & 0xFFFFFC00) + mmu_asid;
   325     mmu_urc++;
   326     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   327         mmu_urc = 0;
   328     }
   330     while( low != high ) {
   331         int posn = (high+low)>>1;
   332         int masked = lookup & mmu_utlb_sorted[posn].mask;
   333         if( mmu_utlb_sorted[posn].key < masked ) {
   334             low = posn+1;
   335         } else if( mmu_utlb_sorted[posn].key > masked ) {
   336             high = posn;
   337         } else {
   338             return mmu_utlb_sorted[posn].entryNo;
   339         }
   340     }
   341     return -1;
   343 }
   345 static void mmu_utlb_insert_entry( int entry )
   346 {
   347     int low = 0;
   348     int high = mmu_utlb_entries;
   349     uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
   351     assert( mmu_utlb_entries < UTLB_ENTRY_COUNT );
   352     /* Find the insertion point */
   353     while( low != high ) {
   354         int posn = (high+low)>>1;
   355         if( mmu_utlb_sorted[posn].key < key ) {
   356             low = posn+1;
   357         } else if( mmu_utlb_sorted[posn].key > key ) {
   358             high = posn;
   359         } else {
   360             /* Exact match - multi-hit */
   361             mmu_utlb_sorted[posn].entryNo = -2;
   362             return;
   363         }
   364     } /* 0 2 4 6 */
   365     memmove( &mmu_utlb_sorted[low+1], &mmu_utlb_sorted[low], 
   366              (mmu_utlb_entries - low) * sizeof(struct utlb_sort_entry) );
   367     mmu_utlb_sorted[low].key = key;
   368     mmu_utlb_sorted[low].mask = mmu_utlb[entry].mask | 0x000000FF;
   369     mmu_utlb_sorted[low].entryNo = entry;
   370     mmu_utlb_entries++;
   371 }
   373 static void mmu_utlb_remove_entry( int entry )
   374 {
   375     int low = 0;
   376     int high = mmu_utlb_entries;
   377     uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
   378     while( low != high ) {
   379         int posn = (high+low)>>1;
   380         if( mmu_utlb_sorted[posn].key < key ) {
   381             low = posn+1;
   382         } else if( mmu_utlb_sorted[posn].key > key ) {
   383             high = posn;
   384         } else {
   385             if( mmu_utlb_sorted[posn].entryNo == -2 ) {
   386                 /* Multiple-entry recorded - rebuild the whole table minus entry */
   387                 int i;
   388                 mmu_utlb_entries = 0;
   389                 for( i=0; i< UTLB_ENTRY_COUNT; i++ ) {
   390                     if( i != entry && (mmu_utlb[i].flags & TLB_VALID)  ) {
   391                         mmu_utlb_insert_entry(i);
   392                     }
   393                 }
   394             } else {
   395                 mmu_utlb_entries--;
   396                 memmove( &mmu_utlb_sorted[posn], &mmu_utlb_sorted[posn+1],
   397                          (mmu_utlb_entries - posn)*sizeof(struct utlb_sort_entry) );
   398             }
   399             return;
   400         }
   401     }
   402     assert( 0 && "UTLB key not found!" );
   403 }
   405 static void mmu_utlb_sorted_reload()
   406 {
   407     int i;
   408     mmu_utlb_entries = 0;
   409     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   410         if( mmu_utlb[i].flags & TLB_VALID ) 
   411             mmu_utlb_insert_entry( i );
   412     }
   413 }
   415 /* TLB maintanence */
   417 /**
   418  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   419  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   420  */
   421 void MMU_ldtlb()
   422 {
   423     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   424         mmu_utlb_remove_entry( mmu_urc );
   425     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   426     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   427     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   428     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   429     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   430     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   431     if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
   432         mmu_utlb[mmu_urc].ppn |= 0xE0000000;
   433     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   434         mmu_utlb_insert_entry( mmu_urc );
   435 }
   437 static void mmu_invalidate_tlb()
   438 {
   439     int i;
   440     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   441         mmu_itlb[i].flags &= (~TLB_VALID);
   442     }
   443     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   444         mmu_utlb[i].flags &= (~TLB_VALID);
   445     }
   446     mmu_utlb_entries = 0;
   447 }
   449 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   451 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   452 {
   453     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   454     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   455 }
   456 int32_t mmu_itlb_data_read( sh4addr_t addr )
   457 {
   458     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   459     return (ent->ppn & 0x1FFFFC00) | ent->flags;
   460 }
   462 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   463 {
   464     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   465     ent->vpn = val & 0xFFFFFC00;
   466     ent->asid = val & 0x000000FF;
   467     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   468 }
   470 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   471 {
   472     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   473     ent->ppn = val & 0x1FFFFC00;
   474     ent->flags = val & 0x00001DA;
   475     ent->mask = get_mask_for_flags(val);
   476     if( ent->ppn >= 0x1C000000 )
   477         ent->ppn |= 0xE0000000;
   478 }
   480 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   481 #define UTLB_ASSOC(addr) (addr&0x80)
   482 #define UTLB_DATA2(addr) (addr&0x00800000)
   484 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   485 {
   486     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   487     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   488     ((ent->flags & TLB_DIRTY)<<7);
   489 }
   490 int32_t mmu_utlb_data_read( sh4addr_t addr )
   491 {
   492     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   493     if( UTLB_DATA2(addr) ) {
   494         return ent->pcmcia;
   495     } else {
   496         return (ent->ppn&0x1FFFFC00) | ent->flags;
   497     }
   498 }
   500 /**
   501  * Find a UTLB entry for the associative TLB write - same as the normal
   502  * lookup but ignores the valid bit.
   503  */
   504 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   505 {
   506     int result = -1;
   507     unsigned int i;
   508     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   509         if( (mmu_utlb[i].flags & TLB_VALID) &&
   510                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
   511                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   512             if( result != -1 ) {
   513                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   514                 return -2;
   515             }
   516             result = i;
   517         }
   518     }
   519     return result;
   520 }
   522 /**
   523  * Find a ITLB entry for the associative TLB write - same as the normal
   524  * lookup but ignores the valid bit.
   525  */
   526 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   527 {
   528     int result = -1;
   529     unsigned int i;
   530     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   531         if( (mmu_itlb[i].flags & TLB_VALID) &&
   532                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
   533                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   534             if( result != -1 ) {
   535                 return -2;
   536             }
   537             result = i;
   538         }
   539     }
   540     return result;
   541 }
   543 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   544 {
   545     if( UTLB_ASSOC(addr) ) {
   546         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   547         if( utlb >= 0 ) {
   548             struct utlb_entry *ent = &mmu_utlb[utlb];
   549             uint32_t old_flags = ent->flags;
   550             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   551             ent->flags |= (val & TLB_VALID);
   552             ent->flags |= ((val & 0x200)>>7);
   553             if( (old_flags & TLB_VALID) && !(ent->flags&TLB_VALID) ) {
   554                 mmu_utlb_remove_entry( utlb );
   555             } else if( !(old_flags & TLB_VALID) && (ent->flags&TLB_VALID) ) {
   556                 mmu_utlb_insert_entry( utlb );
   557             }
   558         }
   560         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   561         if( itlb >= 0 ) {
   562             struct itlb_entry *ent = &mmu_itlb[itlb];
   563             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   564         }
   566         if( itlb == -2 || utlb == -2 ) {
   567             MMU_TLB_MULTI_HIT_ERROR(addr);
   568             return;
   569         }
   570     } else {
   571         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   572         if( ent->flags & TLB_VALID ) 
   573             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
   574         ent->vpn = (val & 0xFFFFFC00);
   575         ent->asid = (val & 0xFF);
   576         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   577         ent->flags |= (val & TLB_VALID);
   578         ent->flags |= ((val & 0x200)>>7);
   579         if( ent->flags & TLB_VALID ) 
   580             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
   581     }
   582 }
   584 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   585 {
   586     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   587     if( UTLB_DATA2(addr) ) {
   588         ent->pcmcia = val & 0x0000000F;
   589     } else {
   590         if( ent->flags & TLB_VALID ) 
   591             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
   592         ent->ppn = (val & 0x1FFFFC00);
   593         ent->flags = (val & 0x000001FF);
   594         ent->mask = get_mask_for_flags(val);
   595         if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
   596             mmu_utlb[mmu_urc].ppn |= 0xE0000000;
   597         if( ent->flags & TLB_VALID ) 
   598             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
   599     }
   600 }
   602 /* Cache access - not implemented */
   604 int32_t mmu_icache_addr_read( sh4addr_t addr )
   605 {
   606     return 0; // not implemented
   607 }
   608 int32_t mmu_icache_data_read( sh4addr_t addr )
   609 {
   610     return 0; // not implemented
   611 }
   612 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   613 {
   614     return 0; // not implemented
   615 }
   616 int32_t mmu_ocache_data_read( sh4addr_t addr )
   617 {
   618     return 0; // not implemented
   619 }
   621 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   622 {
   623 }
   625 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   626 {
   627 }
   629 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   630 {
   631 }
   633 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   634 {
   635 }
   637 /******************************************************************************/
   638 /*                        MMU TLB address translation                         */
   639 /******************************************************************************/
   641 /**
   642  * The translations are excessively complicated, but unfortunately it's a
   643  * complicated system. TODO: make this not be painfully slow.
   644  */
   646 /**
   647  * Perform the actual utlb lookup w/ asid matching.
   648  * Possible utcomes are:
   649  *   0..63 Single match - good, return entry found
   650  *   -1 No match - raise a tlb data miss exception
   651  *   -2 Multiple matches - raise a multi-hit exception (reset)
   652  * @param vpn virtual address to resolve
   653  * @return the resultant UTLB entry, or an error.
   654  */
   655 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   656 {
   657     int result = -1;
   658     unsigned int i;
   660     mmu_urc++;
   661     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   662         mmu_urc = 0;
   663     }
   665     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   666         if( (mmu_utlb[i].flags & TLB_VALID) &&
   667                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   668                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   669             if( result != -1 ) {
   670                 return -2;
   671             }
   672             result = i;
   673         }
   674     }
   675     return result;
   676 }
   678 /**
   679  * Perform the actual utlb lookup matching on vpn only
   680  * Possible utcomes are:
   681  *   0..63 Single match - good, return entry found
   682  *   -1 No match - raise a tlb data miss exception
   683  *   -2 Multiple matches - raise a multi-hit exception (reset)
   684  * @param vpn virtual address to resolve
   685  * @return the resultant UTLB entry, or an error.
   686  */
   687 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   688 {
   689     int result = -1;
   690     unsigned int i;
   692     mmu_urc++;
   693     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   694         mmu_urc = 0;
   695     }
   697     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   698         if( (mmu_utlb[i].flags & TLB_VALID) &&
   699                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   700             if( result != -1 ) {
   701                 return -2;
   702             }
   703             result = i;
   704         }
   705     }
   707     return result;
   708 }
   710 /**
   711  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   712  * @return the number (0-3) of the replaced entry.
   713  */
   714 static int inline mmu_itlb_update_from_utlb( int entryNo )
   715 {
   716     int replace;
   717     /* Determine entry to replace based on lrui */
   718     if( (mmu_lrui & 0x38) == 0x38 ) {
   719         replace = 0;
   720         mmu_lrui = mmu_lrui & 0x07;
   721     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   722         replace = 1;
   723         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   724     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   725         replace = 2;
   726         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   727     } else { // Note - gets invalid entries too
   728         replace = 3;
   729         mmu_lrui = (mmu_lrui | 0x0B);
   730     }
   732     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   733     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   734     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   735     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   736     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   737     return replace;
   738 }
   740 /**
   741  * Perform the actual itlb lookup w/ asid protection
   742  * Possible utcomes are:
   743  *   0..63 Single match - good, return entry found
   744  *   -1 No match - raise a tlb data miss exception
   745  *   -2 Multiple matches - raise a multi-hit exception (reset)
   746  * @param vpn virtual address to resolve
   747  * @return the resultant ITLB entry, or an error.
   748  */
   749 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   750 {
   751     int result = -1;
   752     unsigned int i;
   754     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   755         if( (mmu_itlb[i].flags & TLB_VALID) &&
   756                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   757                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   758             if( result != -1 ) {
   759                 return -2;
   760             }
   761             result = i;
   762         }
   763     }
   765     if( result == -1 ) {
   766         int utlbEntry = mmu_utlb_sorted_find( vpn );
   767         if( utlbEntry < 0 ) {
   768             return utlbEntry;
   769         } else {
   770             return mmu_itlb_update_from_utlb( utlbEntry );
   771         }
   772     }
   774     switch( result ) {
   775     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   776     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   777     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   778     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   779     }
   781     return result;
   782 }
   784 /**
   785  * Perform the actual itlb lookup on vpn only
   786  * Possible utcomes are:
   787  *   0..63 Single match - good, return entry found
   788  *   -1 No match - raise a tlb data miss exception
   789  *   -2 Multiple matches - raise a multi-hit exception (reset)
   790  * @param vpn virtual address to resolve
   791  * @return the resultant ITLB entry, or an error.
   792  */
   793 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   794 {
   795     int result = -1;
   796     unsigned int i;
   798     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   799         if( (mmu_itlb[i].flags & TLB_VALID) &&
   800                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   801             if( result != -1 ) {
   802                 return -2;
   803             }
   804             result = i;
   805         }
   806     }
   808     if( result == -1 ) {
   809         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   810         if( utlbEntry < 0 ) {
   811             return utlbEntry;
   812         } else {
   813             return mmu_itlb_update_from_utlb( utlbEntry );
   814         }
   815     }
   817     switch( result ) {
   818     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   819     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   820     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   821     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   822     }
   824     return result;
   825 }
   827 #ifdef HAVE_FRAME_ADDRESS
   828 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc )
   829 #else
   830 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
   831 #endif
   832 {
   833     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   834     if( addr & 0x80000000 ) {
   835         if( IS_SH4_PRIVMODE() ) {
   836             if( addr >= 0xE0000000 ) {
   837                 return addr; /* P4 - passthrough */
   838             } else if( addr < 0xC0000000 ) {
   839                 /* P1, P2 regions are pass-through (no translation) */
   840                 return VMA_TO_EXT_ADDR(addr);
   841             }
   842         } else {
   843             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   844                     ((mmucr&MMUCR_SQMD) == 0) ) {
   845                 /* Conditional user-mode access to the store-queue (no translation) */
   846                 return addr;
   847             }
   848             MMU_READ_ADDR_ERROR();
   849             RETURN_VIA(exc);
   850         }
   851     }
   853     if( (mmucr & MMUCR_AT) == 0 ) {
   854         return VMA_TO_EXT_ADDR(addr);
   855     }
   857     /* If we get this far, translation is required */
   858     int entryNo;
   859     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   860         entryNo = mmu_utlb_sorted_find( addr );
   861     } else {
   862         entryNo = mmu_utlb_lookup_vpn( addr );
   863     }
   865     switch(entryNo) {
   866     case -1:
   867     MMU_TLB_READ_MISS_ERROR(addr);
   868     RETURN_VIA(exc);
   869     case -2:
   870     MMU_TLB_MULTI_HIT_ERROR(addr);
   871     RETURN_VIA(exc);
   872     default:
   873         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   874                 !IS_SH4_PRIVMODE() ) {
   875             /* protection violation */
   876             MMU_TLB_READ_PROT_ERROR(addr);
   877             RETURN_VIA(exc);
   878         }
   880         /* finally generate the target address */
   881         return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   882         	(addr & (~mmu_utlb[entryNo].mask));
   883     }
   884 }
   886 #ifdef HAVE_FRAME_ADDRESS
   887 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc )
   888 #else
   889 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
   890 #endif
   891 {
   892     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   893     if( addr & 0x80000000 ) {
   894         if( IS_SH4_PRIVMODE() ) {
   895             if( addr >= 0xE0000000 ) {
   896                 return addr; /* P4 - passthrough */
   897             } else if( addr < 0xC0000000 ) {
   898                 /* P1, P2 regions are pass-through (no translation) */
   899                 return VMA_TO_EXT_ADDR(addr);
   900             }
   901         } else {
   902             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   903                     ((mmucr&MMUCR_SQMD) == 0) ) {
   904                 /* Conditional user-mode access to the store-queue (no translation) */
   905                 return addr;
   906             }
   907             MMU_WRITE_ADDR_ERROR();
   908             RETURN_VIA(exc);
   909         }
   910     }
   912     if( (mmucr & MMUCR_AT) == 0 ) {
   913         return VMA_TO_EXT_ADDR(addr);
   914     }
   916     /* If we get this far, translation is required */
   917     int entryNo;
   918     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   919         entryNo = mmu_utlb_sorted_find( addr );
   920     } else {
   921         entryNo = mmu_utlb_lookup_vpn( addr );
   922     }
   924     switch(entryNo) {
   925     case -1:
   926     MMU_TLB_WRITE_MISS_ERROR(addr);
   927     RETURN_VIA(exc);
   928     case -2:
   929     MMU_TLB_MULTI_HIT_ERROR(addr);
   930     RETURN_VIA(exc);
   931     default:
   932         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   933                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   934             /* protection violation */
   935             MMU_TLB_WRITE_PROT_ERROR(addr);
   936             RETURN_VIA(exc);
   937         }
   939         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   940             MMU_TLB_INITIAL_WRITE_ERROR(addr);
   941             RETURN_VIA(exc);
   942         }
   944         /* finally generate the target address */
   945         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   946         	(addr & (~mmu_utlb[entryNo].mask));
   947         return pma;
   948     }
   949 }
   951 /**
   952  * Update the icache for an untranslated address
   953  */
   954 static inline void mmu_update_icache_phys( sh4addr_t addr )
   955 {
   956     if( (addr & 0x1C000000) == 0x0C000000 ) {
   957         /* Main ram */
   958         sh4_icache.page_vma = addr & 0xFF000000;
   959         sh4_icache.page_ppa = 0x0C000000;
   960         sh4_icache.mask = 0xFF000000;
   961         sh4_icache.page = sh4_main_ram;
   962     } else if( (addr & 0x1FE00000) == 0 ) {
   963         /* BIOS ROM */
   964         sh4_icache.page_vma = addr & 0xFFE00000;
   965         sh4_icache.page_ppa = 0;
   966         sh4_icache.mask = 0xFFE00000;
   967         sh4_icache.page = mem_get_region(0);
   968     } else {
   969         /* not supported */
   970         sh4_icache.page_vma = -1;
   971     }
   972 }
   974 /**
   975  * Update the sh4_icache structure to describe the page(s) containing the
   976  * given vma. If the address does not reference a RAM/ROM region, the icache
   977  * will be invalidated instead.
   978  * If AT is on, this method will raise TLB exceptions normally
   979  * (hence this method should only be used immediately prior to execution of
   980  * code), and otherwise will set the icache according to the matching TLB entry.
   981  * If AT is off, this method will set the entire referenced RAM/ROM region in
   982  * the icache.
   983  * @return TRUE if the update completed (successfully or otherwise), FALSE
   984  * if an exception was raised.
   985  */
   986 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
   987 {
   988     int entryNo;
   989     if( IS_SH4_PRIVMODE()  ) {
   990         if( addr & 0x80000000 ) {
   991             if( addr < 0xC0000000 ) {
   992                 /* P1, P2 and P4 regions are pass-through (no translation) */
   993                 mmu_update_icache_phys(addr);
   994                 return TRUE;
   995             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   996                 MMU_READ_ADDR_ERROR();
   997                 return FALSE;
   998             }
   999         }
  1001         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1002         if( (mmucr & MMUCR_AT) == 0 ) {
  1003             mmu_update_icache_phys(addr);
  1004             return TRUE;
  1007         if( (mmucr & MMUCR_SV) == 0 )
  1008         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1009         else
  1010         	entryNo = mmu_itlb_lookup_vpn( addr );
  1011     } else {
  1012         if( addr & 0x80000000 ) {
  1013             MMU_READ_ADDR_ERROR();
  1014             return FALSE;
  1017         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1018         if( (mmucr & MMUCR_AT) == 0 ) {
  1019             mmu_update_icache_phys(addr);
  1020             return TRUE;
  1023         entryNo = mmu_itlb_lookup_vpn_asid( addr );
  1025         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
  1026             MMU_TLB_READ_PROT_ERROR(addr);
  1027             return FALSE;
  1031     switch(entryNo) {
  1032     case -1:
  1033     MMU_TLB_READ_MISS_ERROR(addr);
  1034     return FALSE;
  1035     case -2:
  1036     MMU_TLB_MULTI_HIT_ERROR(addr);
  1037     return FALSE;
  1038     default:
  1039         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
  1040         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
  1041         if( sh4_icache.page == NULL ) {
  1042             sh4_icache.page_vma = -1;
  1043         } else {
  1044             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
  1045             sh4_icache.mask = mmu_itlb[entryNo].mask;
  1047         return TRUE;
  1051 /**
  1052  * Translate address for disassembly purposes (ie performs an instruction
  1053  * lookup) - does not raise exceptions or modify any state, and ignores
  1054  * protection bits. Returns the translated address, or MMU_VMA_ERROR
  1055  * on translation failure.
  1056  */
  1057 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
  1059     if( vma & 0x80000000 ) {
  1060         if( vma < 0xC0000000 ) {
  1061             /* P1, P2 and P4 regions are pass-through (no translation) */
  1062             return VMA_TO_EXT_ADDR(vma);
  1063         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
  1064             /* Not translatable */
  1065             return MMU_VMA_ERROR;
  1069     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1070     if( (mmucr & MMUCR_AT) == 0 ) {
  1071         return VMA_TO_EXT_ADDR(vma);
  1074     int entryNo = mmu_itlb_lookup_vpn( vma );
  1075     if( entryNo == -2 ) {
  1076         entryNo = mmu_itlb_lookup_vpn_asid( vma );
  1078     if( entryNo < 0 ) {
  1079         return MMU_VMA_ERROR;
  1080     } else {
  1081         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
  1082         (vma & (~mmu_itlb[entryNo].mask));
  1086 void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
  1088     int queue = (addr&0x20)>>2;
  1089     uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
  1090     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
  1091     sh4addr_t target = (addr&0x03FFFFE0) | hi;
  1092     mem_copy_to_sh4( target, src, 32 );
  1095 gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )
  1097     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
  1098     int queue = (addr&0x20)>>2;
  1099     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
  1100     sh4addr_t target;
  1101     /* Store queue operation */
  1103     int entryNo;
  1104     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
  1105     	entryNo = mmu_utlb_lookup_vpn_asid( addr );
  1106     } else {
  1107     	entryNo = mmu_utlb_lookup_vpn( addr );
  1109     switch(entryNo) {
  1110     case -1:
  1111     MMU_TLB_WRITE_MISS_ERROR(addr);
  1112     return FALSE;
  1113     case -2:
  1114     MMU_TLB_MULTI_HIT_ERROR(addr);
  1115     return FALSE;
  1116     default:
  1117     	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
  1118     			: ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
  1119     		/* protection violation */
  1120     		MMU_TLB_WRITE_PROT_ERROR(addr);
  1121     		return FALSE;
  1124     	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
  1125     		MMU_TLB_INITIAL_WRITE_ERROR(addr);
  1126     		return FALSE;
  1129     	/* finally generate the target address */
  1130     	target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
  1131     			(addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
  1134     mem_copy_to_sh4( target, src, 32 );
  1135     return TRUE;
.