Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 934:3acd3b3ee6d1
prev933:880c37bb1909
next939:6f2302afeb89
author nkeynes
date Sat Dec 27 03:14:59 2008 +0000 (15 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Update sh4x86 to take advantage of SR assumptions. nice 2% there :)
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include <assert.h>
    22 #include "sh4/sh4mmio.h"
    23 #include "sh4/sh4core.h"
    24 #include "sh4/sh4trans.h"
    25 #include "dreamcast.h"
    26 #include "mem.h"
    27 #include "mmu.h"
    29 #ifdef HAVE_FRAME_ADDRESS
    30 #define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
    31 #else
    32 #define RETURN_VIA(exc) return MMU_VMA_ERROR
    33 #endif
    35 /* The MMU (practically unique in the system) is allowed to raise exceptions
    36  * directly, with a return code indicating that one was raised and the caller
    37  * had better behave appropriately.
    38  */
    39 #define RAISE_TLB_ERROR(code, vpn) \
    40     MMIO_WRITE(MMU, TEA, vpn); \
    41     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    42     sh4_raise_tlb_exception(code);
    44 #define RAISE_MEM_ERROR(code, vpn) \
    45     MMIO_WRITE(MMU, TEA, vpn); \
    46     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    47     sh4_raise_exception(code);
    49 #define RAISE_OTHER_ERROR(code) \
    50     sh4_raise_exception(code);
    51 /**
    52  * Abort with a non-MMU address error. Caused by user-mode code attempting
    53  * to access privileged regions, or alignment faults.
    54  */
    55 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    56 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    58 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    59 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    60 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    61 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    62 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    63 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    64     MMIO_WRITE(MMU, TEA, vpn); \
    65     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    68 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
    69 #define OCRAM_END   (0x20000000>>LXDREAM_PAGE_BITS)
    72 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    73 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    74 static uint32_t mmu_urc;
    75 static uint32_t mmu_urb;
    76 static uint32_t mmu_lrui;
    77 static uint32_t mmu_asid; // current asid
    79 static struct utlb_sort_entry mmu_utlb_sorted[UTLB_ENTRY_COUNT];
    80 static uint32_t mmu_utlb_entries; // Number of entries in mmu_utlb_sorted. 
    82 static sh4ptr_t cache = NULL;
    84 static void mmu_invalidate_tlb();
    85 static void mmu_utlb_sorted_reset();
    86 static void mmu_utlb_sorted_reload();
    88 static uint32_t get_mask_for_flags( uint32_t flags )
    89 {
    90     switch( flags & TLB_SIZE_MASK ) {
    91     case TLB_SIZE_1K: return MASK_1K;
    92     case TLB_SIZE_4K: return MASK_4K;
    93     case TLB_SIZE_64K: return MASK_64K;
    94     case TLB_SIZE_1M: return MASK_1M;
    95     default: return 0; /* Unreachable */
    96     }
    97 }
    99 MMIO_REGION_READ_FN( MMU, reg )
   100 {
   101     reg &= 0xFFF;
   102     switch( reg ) {
   103     case MMUCR:
   104         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   105     default:
   106         return MMIO_READ( MMU, reg );
   107     }
   108 }
   110 MMIO_REGION_WRITE_FN( MMU, reg, val )
   111 {
   112     uint32_t tmp;
   113     reg &= 0xFFF;
   114     switch(reg) {
   115     case SH4VER:
   116         return;
   117     case PTEH:
   118         val &= 0xFFFFFCFF;
   119         if( (val & 0xFF) != mmu_asid ) {
   120             mmu_asid = val&0xFF;
   121             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   122         }
   123         break;
   124     case PTEL:
   125         val &= 0x1FFFFDFF;
   126         break;
   127     case PTEA:
   128         val &= 0x0000000F;
   129         break;
   130     case TRA:
   131     	val &= 0x000003FC;
   132     	break;
   133     case EXPEVT:
   134     case INTEVT:
   135     	val &= 0x00000FFF;
   136     	break;
   137     case MMUCR:
   138         if( val & MMUCR_TI ) {
   139             mmu_invalidate_tlb();
   140         }
   141         mmu_urc = (val >> 10) & 0x3F;
   142         mmu_urb = (val >> 18) & 0x3F;
   143         mmu_lrui = (val >> 26) & 0x3F;
   144         val &= 0x00000301;
   145         tmp = MMIO_READ( MMU, MMUCR );
   146         if( (val ^ tmp) & (MMUCR_AT|MMUCR_SV) ) {
   147             // AT flag has changed state - flush the xlt cache as all bets
   148             // are off now. We also need to force an immediate exit from the
   149             // current block
   150             MMIO_WRITE( MMU, MMUCR, val );
   151             sh4_flush_icache();
   152         }
   153         break;
   154     case CCR:
   155         CCN_set_cache_control( val );
   156         val &= 0x81A7;
   157         break;
   158     case MMUUNK1:
   159     	/* Note that if the high bit is set, this appears to reset the machine.
   160     	 * Not emulating this behaviour yet until we know why...
   161     	 */
   162     	val &= 0x00010007;
   163     	break;
   164     case QACR0:
   165     case QACR1:
   166     	val &= 0x0000001C;
   167     	break;
   168     case PMCR1:
   169         PMM_write_control(0, val);
   170         val &= 0x0000C13F;
   171         break;
   172     case PMCR2:
   173         PMM_write_control(1, val);
   174         val &= 0x0000C13F;
   175         break;
   176     default:
   177         break;
   178     }
   179     MMIO_WRITE( MMU, reg, val );
   180 }
   183 void MMU_init()
   184 {
   185 }
   187 void MMU_reset()
   188 {
   189     mmio_region_MMU_write( CCR, 0 );
   190     mmio_region_MMU_write( MMUCR, 0 );
   191     mmu_utlb_sorted_reload();
   192 }
   194 void MMU_save_state( FILE *f )
   195 {
   196     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   197     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   198     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   199     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   200     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   201     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   202 }
   204 int MMU_load_state( FILE *f )
   205 {
   206     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   207         return 1;
   208     }
   209     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   210         return 1;
   211     }
   212     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   213         return 1;
   214     }
   215     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   216         return 1;
   217     }
   218     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   219         return 1;
   220     }
   221     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   222         return 1;
   223     }
   224     mmu_utlb_sorted_reload();
   225     return 0;
   226 }
   229 /******************* Sorted TLB data structure ****************/
   230 /*
   231  * mmu_utlb_sorted maintains a list of all active (valid) entries,
   232  * sorted by masked VPN and then ASID. Multi-hit entries are resolved 
   233  * ahead of time, and have -1 recorded as the corresponding PPN.
   234  * 
   235  * FIXME: Multi-hit detection doesn't pick up cases where two pages 
   236  * overlap due to different sizes (and don't share the same base
   237  * address). 
   238  */ 
   239 static void mmu_utlb_sorted_reset() 
   240 {
   241     mmu_utlb_entries = 0;
   242 }
   244 /**
   245  * Find an entry in the sorted table (VPN+ASID check). 
   246  */
   247 static inline int mmu_utlb_sorted_find( sh4addr_t vma )
   248 {
   249     int low = 0;
   250     int high = mmu_utlb_entries;
   251     uint32_t lookup = (vma & 0xFFFFFC00) + mmu_asid;
   253     mmu_urc++;
   254     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   255         mmu_urc = 0;
   256     }
   258     while( low != high ) {
   259         int posn = (high+low)>>1;
   260         int masked = lookup & mmu_utlb_sorted[posn].mask;
   261         if( mmu_utlb_sorted[posn].key < masked ) {
   262             low = posn+1;
   263         } else if( mmu_utlb_sorted[posn].key > masked ) {
   264             high = posn;
   265         } else {
   266             return mmu_utlb_sorted[posn].entryNo;
   267         }
   268     }
   269     return -1;
   271 }
   273 static void mmu_utlb_insert_entry( int entry )
   274 {
   275     int low = 0;
   276     int high = mmu_utlb_entries;
   277     uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
   279     assert( mmu_utlb_entries < UTLB_ENTRY_COUNT );
   280     /* Find the insertion point */
   281     while( low != high ) {
   282         int posn = (high+low)>>1;
   283         if( mmu_utlb_sorted[posn].key < key ) {
   284             low = posn+1;
   285         } else if( mmu_utlb_sorted[posn].key > key ) {
   286             high = posn;
   287         } else {
   288             /* Exact match - multi-hit */
   289             mmu_utlb_sorted[posn].entryNo = -2;
   290             return;
   291         }
   292     } /* 0 2 4 6 */
   293     memmove( &mmu_utlb_sorted[low+1], &mmu_utlb_sorted[low], 
   294              (mmu_utlb_entries - low) * sizeof(struct utlb_sort_entry) );
   295     mmu_utlb_sorted[low].key = key;
   296     mmu_utlb_sorted[low].mask = mmu_utlb[entry].mask | 0x000000FF;
   297     mmu_utlb_sorted[low].entryNo = entry;
   298     mmu_utlb_entries++;
   299 }
   301 static void mmu_utlb_remove_entry( int entry )
   302 {
   303     int low = 0;
   304     int high = mmu_utlb_entries;
   305     uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
   306     while( low != high ) {
   307         int posn = (high+low)>>1;
   308         if( mmu_utlb_sorted[posn].key < key ) {
   309             low = posn+1;
   310         } else if( mmu_utlb_sorted[posn].key > key ) {
   311             high = posn;
   312         } else {
   313             if( mmu_utlb_sorted[posn].entryNo == -2 ) {
   314                 /* Multiple-entry recorded - rebuild the whole table minus entry */
   315                 int i;
   316                 mmu_utlb_entries = 0;
   317                 for( i=0; i< UTLB_ENTRY_COUNT; i++ ) {
   318                     if( i != entry && (mmu_utlb[i].flags & TLB_VALID)  ) {
   319                         mmu_utlb_insert_entry(i);
   320                     }
   321                 }
   322             } else {
   323                 mmu_utlb_entries--;
   324                 memmove( &mmu_utlb_sorted[posn], &mmu_utlb_sorted[posn+1],
   325                          (mmu_utlb_entries - posn)*sizeof(struct utlb_sort_entry) );
   326             }
   327             return;
   328         }
   329     }
   330     assert( 0 && "UTLB key not found!" );
   331 }
   333 static void mmu_utlb_sorted_reload()
   334 {
   335     int i;
   336     mmu_utlb_entries = 0;
   337     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   338         if( mmu_utlb[i].flags & TLB_VALID ) 
   339             mmu_utlb_insert_entry( i );
   340     }
   341 }
   343 /* TLB maintanence */
   345 /**
   346  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   347  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   348  */
   349 void MMU_ldtlb()
   350 {
   351     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   352         mmu_utlb_remove_entry( mmu_urc );
   353     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   354     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   355     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   356     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   357     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   358     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   359     if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
   360         mmu_utlb[mmu_urc].ppn |= 0xE0000000;
   361     if( mmu_utlb[mmu_urc].flags & TLB_VALID )
   362         mmu_utlb_insert_entry( mmu_urc );
   363 }
   365 static void mmu_invalidate_tlb()
   366 {
   367     int i;
   368     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   369         mmu_itlb[i].flags &= (~TLB_VALID);
   370     }
   371     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   372         mmu_utlb[i].flags &= (~TLB_VALID);
   373     }
   374     mmu_utlb_entries = 0;
   375 }
   377 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   379 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
   380 {
   381     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   382     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   383 }
   384 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
   385 {
   386     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   387     return (ent->ppn & 0x1FFFFC00) | ent->flags;
   388 }
   390 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   391 {
   392     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   393     ent->vpn = val & 0xFFFFFC00;
   394     ent->asid = val & 0x000000FF;
   395     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   396 }
   398 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   399 {
   400     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   401     ent->ppn = val & 0x1FFFFC00;
   402     ent->flags = val & 0x00001DA;
   403     ent->mask = get_mask_for_flags(val);
   404     if( ent->ppn >= 0x1C000000 )
   405         ent->ppn |= 0xE0000000;
   406 }
   408 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   409 #define UTLB_ASSOC(addr) (addr&0x80)
   410 #define UTLB_DATA2(addr) (addr&0x00800000)
   412 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
   413 {
   414     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   415     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   416     ((ent->flags & TLB_DIRTY)<<7);
   417 }
   418 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
   419 {
   420     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   421     if( UTLB_DATA2(addr) ) {
   422         return ent->pcmcia;
   423     } else {
   424         return (ent->ppn&0x1FFFFC00) | ent->flags;
   425     }
   426 }
   428 /**
   429  * Find a UTLB entry for the associative TLB write - same as the normal
   430  * lookup but ignores the valid bit.
   431  */
   432 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   433 {
   434     int result = -1;
   435     unsigned int i;
   436     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   437         if( (mmu_utlb[i].flags & TLB_VALID) &&
   438                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
   439                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   440             if( result != -1 ) {
   441                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   442                 return -2;
   443             }
   444             result = i;
   445         }
   446     }
   447     return result;
   448 }
   450 /**
   451  * Find a ITLB entry for the associative TLB write - same as the normal
   452  * lookup but ignores the valid bit.
   453  */
   454 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   455 {
   456     int result = -1;
   457     unsigned int i;
   458     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   459         if( (mmu_itlb[i].flags & TLB_VALID) &&
   460                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
   461                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   462             if( result != -1 ) {
   463                 return -2;
   464             }
   465             result = i;
   466         }
   467     }
   468     return result;
   469 }
   471 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   472 {
   473     if( UTLB_ASSOC(addr) ) {
   474         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   475         if( utlb >= 0 ) {
   476             struct utlb_entry *ent = &mmu_utlb[utlb];
   477             uint32_t old_flags = ent->flags;
   478             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   479             ent->flags |= (val & TLB_VALID);
   480             ent->flags |= ((val & 0x200)>>7);
   481             if( (old_flags & TLB_VALID) && !(ent->flags&TLB_VALID) ) {
   482                 mmu_utlb_remove_entry( utlb );
   483             } else if( !(old_flags & TLB_VALID) && (ent->flags&TLB_VALID) ) {
   484                 mmu_utlb_insert_entry( utlb );
   485             }
   486         }
   488         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   489         if( itlb >= 0 ) {
   490             struct itlb_entry *ent = &mmu_itlb[itlb];
   491             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   492         }
   494         if( itlb == -2 || utlb == -2 ) {
   495             MMU_TLB_MULTI_HIT_ERROR(addr);
   496             return;
   497         }
   498     } else {
   499         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   500         if( ent->flags & TLB_VALID ) 
   501             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
   502         ent->vpn = (val & 0xFFFFFC00);
   503         ent->asid = (val & 0xFF);
   504         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   505         ent->flags |= (val & TLB_VALID);
   506         ent->flags |= ((val & 0x200)>>7);
   507         if( ent->flags & TLB_VALID ) 
   508             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
   509     }
   510 }
   512 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   513 {
   514     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   515     if( UTLB_DATA2(addr) ) {
   516         ent->pcmcia = val & 0x0000000F;
   517     } else {
   518         if( ent->flags & TLB_VALID ) 
   519             mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
   520         ent->ppn = (val & 0x1FFFFC00);
   521         ent->flags = (val & 0x000001FF);
   522         ent->mask = get_mask_for_flags(val);
   523         if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
   524             mmu_utlb[mmu_urc].ppn |= 0xE0000000;
   525         if( ent->flags & TLB_VALID ) 
   526             mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
   527     }
   528 }
   530 /******************************************************************************/
   531 /*                        MMU TLB address translation                         */
   532 /******************************************************************************/
   534 /**
   535  * The translations are excessively complicated, but unfortunately it's a
   536  * complicated system. TODO: make this not be painfully slow.
   537  */
   539 /**
   540  * Perform the actual utlb lookup w/ asid matching.
   541  * Possible utcomes are:
   542  *   0..63 Single match - good, return entry found
   543  *   -1 No match - raise a tlb data miss exception
   544  *   -2 Multiple matches - raise a multi-hit exception (reset)
   545  * @param vpn virtual address to resolve
   546  * @return the resultant UTLB entry, or an error.
   547  */
   548 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   549 {
   550     int result = -1;
   551     unsigned int i;
   553     mmu_urc++;
   554     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   555         mmu_urc = 0;
   556     }
   558     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   559         if( (mmu_utlb[i].flags & TLB_VALID) &&
   560                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   561                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   562             if( result != -1 ) {
   563                 return -2;
   564             }
   565             result = i;
   566         }
   567     }
   568     return result;
   569 }
   571 /**
   572  * Perform the actual utlb lookup matching on vpn only
   573  * Possible utcomes are:
   574  *   0..63 Single match - good, return entry found
   575  *   -1 No match - raise a tlb data miss exception
   576  *   -2 Multiple matches - raise a multi-hit exception (reset)
   577  * @param vpn virtual address to resolve
   578  * @return the resultant UTLB entry, or an error.
   579  */
   580 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   581 {
   582     int result = -1;
   583     unsigned int i;
   585     mmu_urc++;
   586     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   587         mmu_urc = 0;
   588     }
   590     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   591         if( (mmu_utlb[i].flags & TLB_VALID) &&
   592                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   593             if( result != -1 ) {
   594                 return -2;
   595             }
   596             result = i;
   597         }
   598     }
   600     return result;
   601 }
   603 /**
   604  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   605  * @return the number (0-3) of the replaced entry.
   606  */
   607 static int inline mmu_itlb_update_from_utlb( int entryNo )
   608 {
   609     int replace;
   610     /* Determine entry to replace based on lrui */
   611     if( (mmu_lrui & 0x38) == 0x38 ) {
   612         replace = 0;
   613         mmu_lrui = mmu_lrui & 0x07;
   614     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   615         replace = 1;
   616         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   617     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   618         replace = 2;
   619         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   620     } else { // Note - gets invalid entries too
   621         replace = 3;
   622         mmu_lrui = (mmu_lrui | 0x0B);
   623     }
   625     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   626     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   627     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   628     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   629     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   630     return replace;
   631 }
   633 /**
   634  * Perform the actual itlb lookup w/ asid protection
   635  * Possible utcomes are:
   636  *   0..63 Single match - good, return entry found
   637  *   -1 No match - raise a tlb data miss exception
   638  *   -2 Multiple matches - raise a multi-hit exception (reset)
   639  * @param vpn virtual address to resolve
   640  * @return the resultant ITLB entry, or an error.
   641  */
   642 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   643 {
   644     int result = -1;
   645     unsigned int i;
   647     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   648         if( (mmu_itlb[i].flags & TLB_VALID) &&
   649                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   650                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   651             if( result != -1 ) {
   652                 return -2;
   653             }
   654             result = i;
   655         }
   656     }
   658     if( result == -1 ) {
   659         int utlbEntry = mmu_utlb_sorted_find( vpn );
   660         if( utlbEntry < 0 ) {
   661             return utlbEntry;
   662         } else {
   663             return mmu_itlb_update_from_utlb( utlbEntry );
   664         }
   665     }
   667     switch( result ) {
   668     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   669     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   670     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   671     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   672     }
   674     return result;
   675 }
   677 /**
   678  * Perform the actual itlb lookup on vpn only
   679  * Possible utcomes are:
   680  *   0..63 Single match - good, return entry found
   681  *   -1 No match - raise a tlb data miss exception
   682  *   -2 Multiple matches - raise a multi-hit exception (reset)
   683  * @param vpn virtual address to resolve
   684  * @return the resultant ITLB entry, or an error.
   685  */
   686 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   687 {
   688     int result = -1;
   689     unsigned int i;
   691     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   692         if( (mmu_itlb[i].flags & TLB_VALID) &&
   693                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   694             if( result != -1 ) {
   695                 return -2;
   696             }
   697             result = i;
   698         }
   699     }
   701     if( result == -1 ) {
   702         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   703         if( utlbEntry < 0 ) {
   704             return utlbEntry;
   705         } else {
   706             return mmu_itlb_update_from_utlb( utlbEntry );
   707         }
   708     }
   710     switch( result ) {
   711     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   712     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   713     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   714     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   715     }
   717     return result;
   718 }
   720 #ifdef HAVE_FRAME_ADDRESS
   721 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc )
   722 #else
   723 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
   724 #endif
   725 {
   726     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   727     if( addr & 0x80000000 ) {
   728         if( IS_SH4_PRIVMODE() ) {
   729             if( addr >= 0xE0000000 ) {
   730                 return addr; /* P4 - passthrough */
   731             } else if( addr < 0xC0000000 ) {
   732                 /* P1, P2 regions are pass-through (no translation) */
   733                 return VMA_TO_EXT_ADDR(addr);
   734             }
   735         } else {
   736             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   737                     ((mmucr&MMUCR_SQMD) == 0) ) {
   738                 /* Conditional user-mode access to the store-queue (no translation) */
   739                 return addr;
   740             }
   741             MMU_READ_ADDR_ERROR();
   742             RETURN_VIA(exc);
   743         }
   744     }
   746     if( (mmucr & MMUCR_AT) == 0 ) {
   747         return VMA_TO_EXT_ADDR(addr);
   748     }
   750     /* If we get this far, translation is required */
   751     int entryNo;
   752     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   753         entryNo = mmu_utlb_sorted_find( addr );
   754     } else {
   755         entryNo = mmu_utlb_lookup_vpn( addr );
   756     }
   758     switch(entryNo) {
   759     case -1:
   760     MMU_TLB_READ_MISS_ERROR(addr);
   761     RETURN_VIA(exc);
   762     case -2:
   763     MMU_TLB_MULTI_HIT_ERROR(addr);
   764     RETURN_VIA(exc);
   765     default:
   766         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   767                 !IS_SH4_PRIVMODE() ) {
   768             /* protection violation */
   769             MMU_TLB_READ_PROT_ERROR(addr);
   770             RETURN_VIA(exc);
   771         }
   773         /* finally generate the target address */
   774         return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   775         	(addr & (~mmu_utlb[entryNo].mask));
   776     }
   777 }
   779 #ifdef HAVE_FRAME_ADDRESS
   780 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc )
   781 #else
   782 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
   783 #endif
   784 {
   785     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   786     if( addr & 0x80000000 ) {
   787         if( IS_SH4_PRIVMODE() ) {
   788             if( addr >= 0xE0000000 ) {
   789                 return addr; /* P4 - passthrough */
   790             } else if( addr < 0xC0000000 ) {
   791                 /* P1, P2 regions are pass-through (no translation) */
   792                 return VMA_TO_EXT_ADDR(addr);
   793             }
   794         } else {
   795             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   796                     ((mmucr&MMUCR_SQMD) == 0) ) {
   797                 /* Conditional user-mode access to the store-queue (no translation) */
   798                 return addr;
   799             }
   800             MMU_WRITE_ADDR_ERROR();
   801             RETURN_VIA(exc);
   802         }
   803     }
   805     if( (mmucr & MMUCR_AT) == 0 ) {
   806         return VMA_TO_EXT_ADDR(addr);
   807     }
   809     /* If we get this far, translation is required */
   810     int entryNo;
   811     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   812         entryNo = mmu_utlb_sorted_find( addr );
   813     } else {
   814         entryNo = mmu_utlb_lookup_vpn( addr );
   815     }
   817     switch(entryNo) {
   818     case -1:
   819     MMU_TLB_WRITE_MISS_ERROR(addr);
   820     RETURN_VIA(exc);
   821     case -2:
   822     MMU_TLB_MULTI_HIT_ERROR(addr);
   823     RETURN_VIA(exc);
   824     default:
   825         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   826                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   827             /* protection violation */
   828             MMU_TLB_WRITE_PROT_ERROR(addr);
   829             RETURN_VIA(exc);
   830         }
   832         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   833             MMU_TLB_INITIAL_WRITE_ERROR(addr);
   834             RETURN_VIA(exc);
   835         }
   837         /* finally generate the target address */
   838         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   839         	(addr & (~mmu_utlb[entryNo].mask));
   840         return pma;
   841     }
   842 }
   844 /**
   845  * Update the icache for an untranslated address
   846  */
   847 static inline void mmu_update_icache_phys( sh4addr_t addr )
   848 {
   849     if( (addr & 0x1C000000) == 0x0C000000 ) {
   850         /* Main ram */
   851         sh4_icache.page_vma = addr & 0xFF000000;
   852         sh4_icache.page_ppa = 0x0C000000;
   853         sh4_icache.mask = 0xFF000000;
   854         sh4_icache.page = dc_main_ram;
   855     } else if( (addr & 0x1FE00000) == 0 ) {
   856         /* BIOS ROM */
   857         sh4_icache.page_vma = addr & 0xFFE00000;
   858         sh4_icache.page_ppa = 0;
   859         sh4_icache.mask = 0xFFE00000;
   860         sh4_icache.page = dc_boot_rom;
   861     } else {
   862         /* not supported */
   863         sh4_icache.page_vma = -1;
   864     }
   865 }
   867 /**
   868  * Update the sh4_icache structure to describe the page(s) containing the
   869  * given vma. If the address does not reference a RAM/ROM region, the icache
   870  * will be invalidated instead.
   871  * If AT is on, this method will raise TLB exceptions normally
   872  * (hence this method should only be used immediately prior to execution of
   873  * code), and otherwise will set the icache according to the matching TLB entry.
   874  * If AT is off, this method will set the entire referenced RAM/ROM region in
   875  * the icache.
   876  * @return TRUE if the update completed (successfully or otherwise), FALSE
   877  * if an exception was raised.
   878  */
   879 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
   880 {
   881     int entryNo;
   882     if( IS_SH4_PRIVMODE()  ) {
   883         if( addr & 0x80000000 ) {
   884             if( addr < 0xC0000000 ) {
   885                 /* P1, P2 and P4 regions are pass-through (no translation) */
   886                 mmu_update_icache_phys(addr);
   887                 return TRUE;
   888             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   889                 MMU_READ_ADDR_ERROR();
   890                 return FALSE;
   891             }
   892         }
   894         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   895         if( (mmucr & MMUCR_AT) == 0 ) {
   896             mmu_update_icache_phys(addr);
   897             return TRUE;
   898         }
   900         if( (mmucr & MMUCR_SV) == 0 )
   901         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
   902         else
   903         	entryNo = mmu_itlb_lookup_vpn( addr );
   904     } else {
   905         if( addr & 0x80000000 ) {
   906             MMU_READ_ADDR_ERROR();
   907             return FALSE;
   908         }
   910         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   911         if( (mmucr & MMUCR_AT) == 0 ) {
   912             mmu_update_icache_phys(addr);
   913             return TRUE;
   914         }
   916         entryNo = mmu_itlb_lookup_vpn_asid( addr );
   918         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
   919             MMU_TLB_READ_PROT_ERROR(addr);
   920             return FALSE;
   921         }
   922     }
   924     switch(entryNo) {
   925     case -1:
   926     MMU_TLB_READ_MISS_ERROR(addr);
   927     return FALSE;
   928     case -2:
   929     MMU_TLB_MULTI_HIT_ERROR(addr);
   930     return FALSE;
   931     default:
   932         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
   933         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
   934         if( sh4_icache.page == NULL ) {
   935             sh4_icache.page_vma = -1;
   936         } else {
   937             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
   938             sh4_icache.mask = mmu_itlb[entryNo].mask;
   939         }
   940         return TRUE;
   941     }
   942 }
   944 /**
   945  * Translate address for disassembly purposes (ie performs an instruction
   946  * lookup) - does not raise exceptions or modify any state, and ignores
   947  * protection bits. Returns the translated address, or MMU_VMA_ERROR
   948  * on translation failure.
   949  */
   950 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
   951 {
   952     if( vma & 0x80000000 ) {
   953         if( vma < 0xC0000000 ) {
   954             /* P1, P2 and P4 regions are pass-through (no translation) */
   955             return VMA_TO_EXT_ADDR(vma);
   956         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
   957             /* Not translatable */
   958             return MMU_VMA_ERROR;
   959         }
   960     }
   962     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   963     if( (mmucr & MMUCR_AT) == 0 ) {
   964         return VMA_TO_EXT_ADDR(vma);
   965     }
   967     int entryNo = mmu_itlb_lookup_vpn( vma );
   968     if( entryNo == -2 ) {
   969         entryNo = mmu_itlb_lookup_vpn_asid( vma );
   970     }
   971     if( entryNo < 0 ) {
   972         return MMU_VMA_ERROR;
   973     } else {
   974         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
   975         (vma & (~mmu_itlb[entryNo].mask));
   976     }
   977 }
   979 void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
   980 {
   981     int queue = (addr&0x20)>>2;
   982     uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
   983     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
   984     sh4addr_t target = (addr&0x03FFFFE0) | hi;
   985     ext_address_space[target>>12]->write_burst( target, src );
   986 } 
   988 gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )
   989 {
   990     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   991     int queue = (addr&0x20)>>2;
   992     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
   993     sh4addr_t target;
   994     /* Store queue operation */
   996     int entryNo;
   997     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   998     	entryNo = mmu_utlb_lookup_vpn_asid( addr );
   999     } else {
  1000     	entryNo = mmu_utlb_lookup_vpn( addr );
  1002     switch(entryNo) {
  1003     case -1:
  1004     MMU_TLB_WRITE_MISS_ERROR(addr);
  1005     return FALSE;
  1006     case -2:
  1007     MMU_TLB_MULTI_HIT_ERROR(addr);
  1008     return FALSE;
  1009     default:
  1010     	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
  1011     			: ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
  1012     		/* protection violation */
  1013     		MMU_TLB_WRITE_PROT_ERROR(addr);
  1014     		return FALSE;
  1017     	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
  1018     		MMU_TLB_INITIAL_WRITE_ERROR(addr);
  1019     		return FALSE;
  1022     	/* finally generate the target address */
  1023     	target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
  1024     			(addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
  1027     ext_address_space[target>>12]->write_burst( target, src );
  1028     return TRUE;
.