Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 841:808d64b05073
prev826:69f2c9f1e608
next905:4c17ebd9ef5e
author nkeynes
date Wed Oct 29 23:36:31 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Enable the FIPR SSE3 code for now, and add a comment on the sh4r.fr alignment
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include "sh4/sh4mmio.h"
    22 #include "sh4/sh4core.h"
    23 #include "sh4/sh4trans.h"
    24 #include "mem.h"
    26 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    28 /* The MMU (practically unique in the system) is allowed to raise exceptions
    29  * directly, with a return code indicating that one was raised and the caller
    30  * had better behave appropriately.
    31  */
    32 #define RAISE_TLB_ERROR(code, vpn) \
    33     MMIO_WRITE(MMU, TEA, vpn); \
    34     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    35     sh4_raise_tlb_exception(code);
    37 #define RAISE_MEM_ERROR(code, vpn) \
    38     MMIO_WRITE(MMU, TEA, vpn); \
    39     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    40     sh4_raise_exception(code);
    42 #define RAISE_OTHER_ERROR(code) \
    43     sh4_raise_exception(code);
    44 /**
    45  * Abort with a non-MMU address error. Caused by user-mode code attempting
    46  * to access privileged regions, or alignment faults.
    47  */
    48 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    49 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    51 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    52 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    53 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    54 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    55 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    56 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    57     MMIO_WRITE(MMU, TEA, vpn); \
    58     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    61 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
    62 #define OCRAM_END   (0x20000000>>LXDREAM_PAGE_BITS)
    64 #define ITLB_ENTRY_COUNT 4
    65 #define UTLB_ENTRY_COUNT 64
    67 /* Entry address */
    68 #define TLB_VALID     0x00000100
    69 #define TLB_USERMODE  0x00000040
    70 #define TLB_WRITABLE  0x00000020
    71 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    72 #define TLB_SIZE_MASK 0x00000090
    73 #define TLB_SIZE_1K   0x00000000
    74 #define TLB_SIZE_4K   0x00000010
    75 #define TLB_SIZE_64K  0x00000080
    76 #define TLB_SIZE_1M   0x00000090
    77 #define TLB_CACHEABLE 0x00000008
    78 #define TLB_DIRTY     0x00000004
    79 #define TLB_SHARE     0x00000002
    80 #define TLB_WRITETHRU 0x00000001
    82 #define MASK_1K  0xFFFFFC00
    83 #define MASK_4K  0xFFFFF000
    84 #define MASK_64K 0xFFFF0000
    85 #define MASK_1M  0xFFF00000
    87 struct itlb_entry {
    88     sh4addr_t vpn; // Virtual Page Number
    89     uint32_t asid; // Process ID
    90     uint32_t mask;
    91     sh4addr_t ppn; // Physical Page Number
    92     uint32_t flags;
    93 };
    95 struct utlb_entry {
    96     sh4addr_t vpn; // Virtual Page Number
    97     uint32_t mask; // Page size mask
    98     uint32_t asid; // Process ID
    99     sh4addr_t ppn; // Physical Page Number
   100     uint32_t flags;
   101     uint32_t pcmcia; // extra pcmcia data - not used
   102 };
   104 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   105 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   106 static uint32_t mmu_urc;
   107 static uint32_t mmu_urb;
   108 static uint32_t mmu_lrui;
   109 static uint32_t mmu_asid; // current asid
   111 static sh4ptr_t cache = NULL;
   113 static void mmu_invalidate_tlb();
   116 static uint32_t get_mask_for_flags( uint32_t flags )
   117 {
   118     switch( flags & TLB_SIZE_MASK ) {
   119     case TLB_SIZE_1K: return MASK_1K;
   120     case TLB_SIZE_4K: return MASK_4K;
   121     case TLB_SIZE_64K: return MASK_64K;
   122     case TLB_SIZE_1M: return MASK_1M;
   123     default: return 0; /* Unreachable */
   124     }
   125 }
   127 int32_t mmio_region_MMU_read( uint32_t reg )
   128 {
   129     switch( reg ) {
   130     case MMUCR:
   131         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   132     default:
   133         return MMIO_READ( MMU, reg );
   134     }
   135 }
   137 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   138 {
   139     uint32_t tmp;
   140     switch(reg) {
   141     case SH4VER:
   142         return;
   143     case PTEH:
   144         val &= 0xFFFFFCFF;
   145         if( (val & 0xFF) != mmu_asid ) {
   146             mmu_asid = val&0xFF;
   147             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   148         }
   149         break;
   150     case PTEL:
   151         val &= 0x1FFFFDFF;
   152         break;
   153     case PTEA:
   154         val &= 0x0000000F;
   155         break;
   156     case TRA:
   157     	val &= 0x000003FC;
   158     	break;
   159     case EXPEVT:
   160     case INTEVT:
   161     	val &= 0x00000FFF;
   162     	break;
   163     case MMUCR:
   164         if( val & MMUCR_TI ) {
   165             mmu_invalidate_tlb();
   166         }
   167         mmu_urc = (val >> 10) & 0x3F;
   168         mmu_urb = (val >> 18) & 0x3F;
   169         mmu_lrui = (val >> 26) & 0x3F;
   170         val &= 0x00000301;
   171         tmp = MMIO_READ( MMU, MMUCR );
   172         if( (val ^ tmp) & MMUCR_AT ) {
   173             // AT flag has changed state - flush the xlt cache as all bets
   174             // are off now. We also need to force an immediate exit from the
   175             // current block
   176             MMIO_WRITE( MMU, MMUCR, val );
   177             sh4_flush_icache();
   178         }
   179         break;
   180     case CCR:
   181         mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
   182         val &= 0x81A7;
   183         break;
   184     case MMUUNK1:
   185     	/* Note that if the high bit is set, this appears to reset the machine.
   186     	 * Not emulating this behaviour yet until we know why...
   187     	 */
   188     	val &= 0x00010007;
   189     	break;
   190     case QACR0:
   191     case QACR1:
   192     	val &= 0x0000001C;
   193     	break;
   194     case PMCR1:
   195         PMM_write_control(0, val);
   196         val &= 0x0000C13F;
   197         break;
   198     case PMCR2:
   199         PMM_write_control(1, val);
   200         val &= 0x0000C13F;
   201         break;
   202     default:
   203         break;
   204     }
   205     MMIO_WRITE( MMU, reg, val );
   206 }
   209 void MMU_init()
   210 {
   211     cache = mem_alloc_pages(2);
   212 }
   214 void MMU_reset()
   215 {
   216     mmio_region_MMU_write( CCR, 0 );
   217     mmio_region_MMU_write( MMUCR, 0 );
   218 }
   220 void MMU_save_state( FILE *f )
   221 {
   222     fwrite( cache, 4096, 2, f );
   223     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   224     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   225     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   226     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   227     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   228     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   229 }
   231 int MMU_load_state( FILE *f )
   232 {
   233     /* Setup the cache mode according to the saved register value
   234      * (mem_load runs before this point to load all MMIO data)
   235      */
   236     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   237     if( fread( cache, 4096, 2, f ) != 2 ) {
   238         return 1;
   239     }
   240     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   241         return 1;
   242     }
   243     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   244         return 1;
   245     }
   246     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   247         return 1;
   248     }
   249     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   250         return 1;
   251     }
   252     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   253         return 1;
   254     }
   255     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   256         return 1;
   257     }
   258     return 0;
   259 }
   261 void mmu_set_cache_mode( int mode )
   262 {
   263     uint32_t i;
   264     switch( mode ) {
   265     case MEM_OC_INDEX0: /* OIX=0 */
   266         for( i=OCRAM_START; i<OCRAM_END; i++ )
   267             page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
   268         break;
   269     case MEM_OC_INDEX1: /* OIX=1 */
   270         for( i=OCRAM_START; i<OCRAM_END; i++ )
   271             page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
   272         break;
   273     default: /* disabled */
   274         for( i=OCRAM_START; i<OCRAM_END; i++ )
   275             page_map[i] = NULL;
   276         break;
   277     }
   278 }
   280 /* TLB maintanence */
   282 /**
   283  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   284  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   285  */
   286 void MMU_ldtlb()
   287 {
   288     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   289     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   290     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   291     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   292     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   293     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   294 }
   296 static void mmu_invalidate_tlb()
   297 {
   298     int i;
   299     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   300         mmu_itlb[i].flags &= (~TLB_VALID);
   301     }
   302     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   303         mmu_utlb[i].flags &= (~TLB_VALID);
   304     }
   305 }
   307 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   309 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   310 {
   311     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   312     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   313 }
   314 int32_t mmu_itlb_data_read( sh4addr_t addr )
   315 {
   316     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   317     return ent->ppn | ent->flags;
   318 }
   320 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   321 {
   322     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   323     ent->vpn = val & 0xFFFFFC00;
   324     ent->asid = val & 0x000000FF;
   325     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   326 }
   328 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   329 {
   330     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   331     ent->ppn = val & 0x1FFFFC00;
   332     ent->flags = val & 0x00001DA;
   333     ent->mask = get_mask_for_flags(val);
   334 }
   336 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   337 #define UTLB_ASSOC(addr) (addr&0x80)
   338 #define UTLB_DATA2(addr) (addr&0x00800000)
   340 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   341 {
   342     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   343     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   344     ((ent->flags & TLB_DIRTY)<<7);
   345 }
   346 int32_t mmu_utlb_data_read( sh4addr_t addr )
   347 {
   348     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   349     if( UTLB_DATA2(addr) ) {
   350         return ent->pcmcia;
   351     } else {
   352         return ent->ppn | ent->flags;
   353     }
   354 }
   356 /**
   357  * Find a UTLB entry for the associative TLB write - same as the normal
   358  * lookup but ignores the valid bit.
   359  */
   360 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   361 {
   362     int result = -1;
   363     unsigned int i;
   364     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   365         if( (mmu_utlb[i].flags & TLB_VALID) &&
   366                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
   367                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   368             if( result != -1 ) {
   369                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   370                 return -2;
   371             }
   372             result = i;
   373         }
   374     }
   375     return result;
   376 }
   378 /**
   379  * Find a ITLB entry for the associative TLB write - same as the normal
   380  * lookup but ignores the valid bit.
   381  */
   382 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   383 {
   384     int result = -1;
   385     unsigned int i;
   386     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   387         if( (mmu_itlb[i].flags & TLB_VALID) &&
   388                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
   389                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   390             if( result != -1 ) {
   391                 return -2;
   392             }
   393             result = i;
   394         }
   395     }
   396     return result;
   397 }
   399 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   400 {
   401     if( UTLB_ASSOC(addr) ) {
   402         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   403         if( utlb >= 0 ) {
   404             struct utlb_entry *ent = &mmu_utlb[utlb];
   405             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   406             ent->flags |= (val & TLB_VALID);
   407             ent->flags |= ((val & 0x200)>>7);
   408         }
   410         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   411         if( itlb >= 0 ) {
   412             struct itlb_entry *ent = &mmu_itlb[itlb];
   413             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   414         }
   416         if( itlb == -2 || utlb == -2 ) {
   417             MMU_TLB_MULTI_HIT_ERROR(addr);
   418             return;
   419         }
   420     } else {
   421         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   422         ent->vpn = (val & 0xFFFFFC00);
   423         ent->asid = (val & 0xFF);
   424         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   425         ent->flags |= (val & TLB_VALID);
   426         ent->flags |= ((val & 0x200)>>7);
   427     }
   428 }
   430 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   431 {
   432     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   433     if( UTLB_DATA2(addr) ) {
   434         ent->pcmcia = val & 0x0000000F;
   435     } else {
   436         ent->ppn = (val & 0x1FFFFC00);
   437         ent->flags = (val & 0x000001FF);
   438         ent->mask = get_mask_for_flags(val);
   439     }
   440 }
   442 /* Cache access - not implemented */
   444 int32_t mmu_icache_addr_read( sh4addr_t addr )
   445 {
   446     return 0; // not implemented
   447 }
   448 int32_t mmu_icache_data_read( sh4addr_t addr )
   449 {
   450     return 0; // not implemented
   451 }
   452 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   453 {
   454     return 0; // not implemented
   455 }
   456 int32_t mmu_ocache_data_read( sh4addr_t addr )
   457 {
   458     return 0; // not implemented
   459 }
   461 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   462 {
   463 }
   465 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   466 {
   467 }
   469 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   470 {
   471 }
   473 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   474 {
   475 }
   477 /******************************************************************************/
   478 /*                        MMU TLB address translation                         */
   479 /******************************************************************************/
   481 /**
   482  * The translations are excessively complicated, but unfortunately it's a
   483  * complicated system. TODO: make this not be painfully slow.
   484  */
   486 /**
   487  * Perform the actual utlb lookup w/ asid matching.
   488  * Possible utcomes are:
   489  *   0..63 Single match - good, return entry found
   490  *   -1 No match - raise a tlb data miss exception
   491  *   -2 Multiple matches - raise a multi-hit exception (reset)
   492  * @param vpn virtual address to resolve
   493  * @return the resultant UTLB entry, or an error.
   494  */
   495 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   496 {
   497     int result = -1;
   498     unsigned int i;
   500     mmu_urc++;
   501     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   502         mmu_urc = 0;
   503     }
   505     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   506         if( (mmu_utlb[i].flags & TLB_VALID) &&
   507                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   508                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   509             if( result != -1 ) {
   510                 return -2;
   511             }
   512             result = i;
   513         }
   514     }
   515     return result;
   516 }
   518 /**
   519  * Perform the actual utlb lookup matching on vpn only
   520  * Possible utcomes are:
   521  *   0..63 Single match - good, return entry found
   522  *   -1 No match - raise a tlb data miss exception
   523  *   -2 Multiple matches - raise a multi-hit exception (reset)
   524  * @param vpn virtual address to resolve
   525  * @return the resultant UTLB entry, or an error.
   526  */
   527 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   528 {
   529     int result = -1;
   530     unsigned int i;
   532     mmu_urc++;
   533     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   534         mmu_urc = 0;
   535     }
   537     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   538         if( (mmu_utlb[i].flags & TLB_VALID) &&
   539                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   540             if( result != -1 ) {
   541                 return -2;
   542             }
   543             result = i;
   544         }
   545     }
   547     return result;
   548 }
   550 /**
   551  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   552  * @return the number (0-3) of the replaced entry.
   553  */
   554 static int inline mmu_itlb_update_from_utlb( int entryNo )
   555 {
   556     int replace;
   557     /* Determine entry to replace based on lrui */
   558     if( (mmu_lrui & 0x38) == 0x38 ) {
   559         replace = 0;
   560         mmu_lrui = mmu_lrui & 0x07;
   561     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   562         replace = 1;
   563         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   564     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   565         replace = 2;
   566         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   567     } else { // Note - gets invalid entries too
   568         replace = 3;
   569         mmu_lrui = (mmu_lrui | 0x0B);
   570     }
   572     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   573     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   574     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   575     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   576     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   577     return replace;
   578 }
   580 /**
   581  * Perform the actual itlb lookup w/ asid protection
   582  * Possible utcomes are:
   583  *   0..63 Single match - good, return entry found
   584  *   -1 No match - raise a tlb data miss exception
   585  *   -2 Multiple matches - raise a multi-hit exception (reset)
   586  * @param vpn virtual address to resolve
   587  * @return the resultant ITLB entry, or an error.
   588  */
   589 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   590 {
   591     int result = -1;
   592     unsigned int i;
   594     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   595         if( (mmu_itlb[i].flags & TLB_VALID) &&
   596                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   597                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   598             if( result != -1 ) {
   599                 return -2;
   600             }
   601             result = i;
   602         }
   603     }
   605     if( result == -1 ) {
   606         int utlbEntry = mmu_utlb_lookup_vpn_asid( vpn );
   607         if( utlbEntry < 0 ) {
   608             return utlbEntry;
   609         } else {
   610             return mmu_itlb_update_from_utlb( utlbEntry );
   611         }
   612     }
   614     switch( result ) {
   615     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   616     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   617     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   618     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   619     }
   621     return result;
   622 }
   624 /**
   625  * Perform the actual itlb lookup on vpn only
   626  * Possible utcomes are:
   627  *   0..63 Single match - good, return entry found
   628  *   -1 No match - raise a tlb data miss exception
   629  *   -2 Multiple matches - raise a multi-hit exception (reset)
   630  * @param vpn virtual address to resolve
   631  * @return the resultant ITLB entry, or an error.
   632  */
   633 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   634 {
   635     int result = -1;
   636     unsigned int i;
   638     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   639         if( (mmu_itlb[i].flags & TLB_VALID) &&
   640                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   641             if( result != -1 ) {
   642                 return -2;
   643             }
   644             result = i;
   645         }
   646     }
   648     if( result == -1 ) {
   649         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   650         if( utlbEntry < 0 ) {
   651             return utlbEntry;
   652         } else {
   653             return mmu_itlb_update_from_utlb( utlbEntry );
   654         }
   655     }
   657     switch( result ) {
   658     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   659     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   660     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   661     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   662     }
   664     return result;
   665 }
   667 sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
   668 {
   669     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   670     if( addr & 0x80000000 ) {
   671         if( IS_SH4_PRIVMODE() ) {
   672             if( addr >= 0xE0000000 ) {
   673                 return addr; /* P4 - passthrough */
   674             } else if( addr < 0xC0000000 ) {
   675                 /* P1, P2 regions are pass-through (no translation) */
   676                 return VMA_TO_EXT_ADDR(addr);
   677             }
   678         } else {
   679             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   680                     ((mmucr&MMUCR_SQMD) == 0) ) {
   681                 /* Conditional user-mode access to the store-queue (no translation) */
   682                 return addr;
   683             }
   684             MMU_READ_ADDR_ERROR();
   685             return MMU_VMA_ERROR;
   686         }
   687     }
   689     if( (mmucr & MMUCR_AT) == 0 ) {
   690         return VMA_TO_EXT_ADDR(addr);
   691     }
   693     /* If we get this far, translation is required */
   694     int entryNo;
   695     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   696         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   697     } else {
   698         entryNo = mmu_utlb_lookup_vpn( addr );
   699     }
   701     switch(entryNo) {
   702     case -1:
   703     MMU_TLB_READ_MISS_ERROR(addr);
   704     return MMU_VMA_ERROR;
   705     case -2:
   706     MMU_TLB_MULTI_HIT_ERROR(addr);
   707     return MMU_VMA_ERROR;
   708     default:
   709         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   710                 !IS_SH4_PRIVMODE() ) {
   711             /* protection violation */
   712             MMU_TLB_READ_PROT_ERROR(addr);
   713             return MMU_VMA_ERROR;
   714         }
   716         /* finally generate the target address */
   717         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   718         	(addr & (~mmu_utlb[entryNo].mask));
   719         if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4
   720         	pma |= 0xE0000000;
   721         return pma;
   722     }
   723 }
   725 sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
   726 {
   727     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   728     if( addr & 0x80000000 ) {
   729         if( IS_SH4_PRIVMODE() ) {
   730             if( addr >= 0xE0000000 ) {
   731                 return addr; /* P4 - passthrough */
   732             } else if( addr < 0xC0000000 ) {
   733                 /* P1, P2 regions are pass-through (no translation) */
   734                 return VMA_TO_EXT_ADDR(addr);
   735             }
   736         } else {
   737             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   738                     ((mmucr&MMUCR_SQMD) == 0) ) {
   739                 /* Conditional user-mode access to the store-queue (no translation) */
   740                 return addr;
   741             }
   742             MMU_WRITE_ADDR_ERROR();
   743             return MMU_VMA_ERROR;
   744         }
   745     }
   747     if( (mmucr & MMUCR_AT) == 0 ) {
   748         return VMA_TO_EXT_ADDR(addr);
   749     }
   751     /* If we get this far, translation is required */
   752     int entryNo;
   753     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   754         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   755     } else {
   756         entryNo = mmu_utlb_lookup_vpn( addr );
   757     }
   759     switch(entryNo) {
   760     case -1:
   761     MMU_TLB_WRITE_MISS_ERROR(addr);
   762     return MMU_VMA_ERROR;
   763     case -2:
   764     MMU_TLB_MULTI_HIT_ERROR(addr);
   765     return MMU_VMA_ERROR;
   766     default:
   767         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   768                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   769             /* protection violation */
   770             MMU_TLB_WRITE_PROT_ERROR(addr);
   771             return MMU_VMA_ERROR;
   772         }
   774         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   775             MMU_TLB_INITIAL_WRITE_ERROR(addr);
   776             return MMU_VMA_ERROR;
   777         }
   779         /* finally generate the target address */
   780         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   781         	(addr & (~mmu_utlb[entryNo].mask));
   782         if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4
   783         	pma |= 0xE0000000;
   784         return pma;
   785     }
   786 }
   788 /**
   789  * Update the icache for an untranslated address
   790  */
   791 void mmu_update_icache_phys( sh4addr_t addr )
   792 {
   793     if( (addr & 0x1C000000) == 0x0C000000 ) {
   794         /* Main ram */
   795         sh4_icache.page_vma = addr & 0xFF000000;
   796         sh4_icache.page_ppa = 0x0C000000;
   797         sh4_icache.mask = 0xFF000000;
   798         sh4_icache.page = sh4_main_ram;
   799     } else if( (addr & 0x1FE00000) == 0 ) {
   800         /* BIOS ROM */
   801         sh4_icache.page_vma = addr & 0xFFE00000;
   802         sh4_icache.page_ppa = 0;
   803         sh4_icache.mask = 0xFFE00000;
   804         sh4_icache.page = mem_get_region(0);
   805     } else {
   806         /* not supported */
   807         sh4_icache.page_vma = -1;
   808     }
   809 }
   811 /**
   812  * Update the sh4_icache structure to describe the page(s) containing the
   813  * given vma. If the address does not reference a RAM/ROM region, the icache
   814  * will be invalidated instead.
   815  * If AT is on, this method will raise TLB exceptions normally
   816  * (hence this method should only be used immediately prior to execution of
   817  * code), and otherwise will set the icache according to the matching TLB entry.
   818  * If AT is off, this method will set the entire referenced RAM/ROM region in
   819  * the icache.
   820  * @return TRUE if the update completed (successfully or otherwise), FALSE
   821  * if an exception was raised.
   822  */
   823 gboolean mmu_update_icache( sh4vma_t addr )
   824 {
   825     int entryNo;
   826     if( IS_SH4_PRIVMODE()  ) {
   827         if( addr & 0x80000000 ) {
   828             if( addr < 0xC0000000 ) {
   829                 /* P1, P2 and P4 regions are pass-through (no translation) */
   830                 mmu_update_icache_phys(addr);
   831                 return TRUE;
   832             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   833                 MMU_READ_ADDR_ERROR();
   834                 return FALSE;
   835             }
   836         }
   838         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   839         if( (mmucr & MMUCR_AT) == 0 ) {
   840             mmu_update_icache_phys(addr);
   841             return TRUE;
   842         }
   844         if( (mmucr & MMUCR_SV) == 0 )
   845         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
   846         else
   847         	entryNo = mmu_itlb_lookup_vpn( addr );
   848     } else {
   849         if( addr & 0x80000000 ) {
   850             MMU_READ_ADDR_ERROR();
   851             return FALSE;
   852         }
   854         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   855         if( (mmucr & MMUCR_AT) == 0 ) {
   856             mmu_update_icache_phys(addr);
   857             return TRUE;
   858         }
   860         entryNo = mmu_itlb_lookup_vpn_asid( addr );
   862         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
   863             MMU_TLB_READ_PROT_ERROR(addr);
   864             return FALSE;
   865         }
   866     }
   868     switch(entryNo) {
   869     case -1:
   870     MMU_TLB_READ_MISS_ERROR(addr);
   871     return FALSE;
   872     case -2:
   873     MMU_TLB_MULTI_HIT_ERROR(addr);
   874     return FALSE;
   875     default:
   876         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
   877         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
   878         if( sh4_icache.page == NULL ) {
   879             sh4_icache.page_vma = -1;
   880         } else {
   881             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
   882             sh4_icache.mask = mmu_itlb[entryNo].mask;
   883         }
   884         return TRUE;
   885     }
   886 }
   888 /**
   889  * Translate address for disassembly purposes (ie performs an instruction
   890  * lookup) - does not raise exceptions or modify any state, and ignores
   891  * protection bits. Returns the translated address, or MMU_VMA_ERROR
   892  * on translation failure.
   893  */
   894 sh4addr_t mmu_vma_to_phys_disasm( sh4vma_t vma )
   895 {
   896     if( vma & 0x80000000 ) {
   897         if( vma < 0xC0000000 ) {
   898             /* P1, P2 and P4 regions are pass-through (no translation) */
   899             return VMA_TO_EXT_ADDR(vma);
   900         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
   901             /* Not translatable */
   902             return MMU_VMA_ERROR;
   903         }
   904     }
   906     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   907     if( (mmucr & MMUCR_AT) == 0 ) {
   908         return VMA_TO_EXT_ADDR(vma);
   909     }
   911     int entryNo = mmu_itlb_lookup_vpn( vma );
   912     if( entryNo == -2 ) {
   913         entryNo = mmu_itlb_lookup_vpn_asid( vma );
   914     }
   915     if( entryNo < 0 ) {
   916         return MMU_VMA_ERROR;
   917     } else {
   918         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
   919         (vma & (~mmu_itlb[entryNo].mask));
   920     }
   921 }
   923 gboolean sh4_flush_store_queue( sh4addr_t addr )
   924 {
   925     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   926     int queue = (addr&0x20)>>2;
   927     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
   928     sh4addr_t target;
   929     /* Store queue operation */
   930     if( mmucr & MMUCR_AT ) {
   931         int entryNo;
   932         if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   933             entryNo = mmu_utlb_lookup_vpn_asid( addr );
   934         } else {
   935             entryNo = mmu_utlb_lookup_vpn( addr );
   936         }
   937         switch(entryNo) {
   938         case -1:
   939         MMU_TLB_WRITE_MISS_ERROR(addr);
   940         return FALSE;
   941         case -2:
   942         MMU_TLB_MULTI_HIT_ERROR(addr);
   943         return FALSE;
   944         default:
   945             if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   946                     : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   947                 /* protection violation */
   948                 MMU_TLB_WRITE_PROT_ERROR(addr);
   949                 return FALSE;
   950             }
   952             if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   953                 MMU_TLB_INITIAL_WRITE_ERROR(addr);
   954                 return FALSE;
   955             }
   957             /* finally generate the target address */
   958             target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   959                     (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
   960         }
   961     } else {
   962         uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
   963         target = (addr&0x03FFFFE0) | hi;
   964     }
   965     mem_copy_to_sh4( target, src, 32 );
   966     return TRUE;
   967 }
.