Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 826:69f2c9f1e608
prev819:ef4fec10a63a
next841:808d64b05073
author nkeynes
date Mon Aug 25 11:29:24 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Start unstubifying the UBC module
view annotate diff log raw
     1 /**
     2  * $Id$
     3  *
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include "sh4/sh4mmio.h"
    22 #include "sh4/sh4core.h"
    23 #include "sh4/sh4trans.h"
    24 #include "mem.h"
    26 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
    28 /* The MMU (practically unique in the system) is allowed to raise exceptions
    29  * directly, with a return code indicating that one was raised and the caller
    30  * had better behave appropriately.
    31  */
    32 #define RAISE_TLB_ERROR(code, vpn) \
    33     MMIO_WRITE(MMU, TEA, vpn); \
    34     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    35     sh4_raise_tlb_exception(code);
    37 #define RAISE_MEM_ERROR(code, vpn) \
    38     MMIO_WRITE(MMU, TEA, vpn); \
    39     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
    40     sh4_raise_exception(code);
    42 #define RAISE_OTHER_ERROR(code) \
    43     sh4_raise_exception(code);
    44 /**
    45  * Abort with a non-MMU address error. Caused by user-mode code attempting
    46  * to access privileged regions, or alignment faults.
    47  */
    48 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
    49 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
    51 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
    52 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
    53 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
    54 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
    55 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
    56 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
    57     MMIO_WRITE(MMU, TEA, vpn); \
    58     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
    61 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
    62 #define OCRAM_END   (0x20000000>>LXDREAM_PAGE_BITS)
    64 #define ITLB_ENTRY_COUNT 4
    65 #define UTLB_ENTRY_COUNT 64
    67 /* Entry address */
    68 #define TLB_VALID     0x00000100
    69 #define TLB_USERMODE  0x00000040
    70 #define TLB_WRITABLE  0x00000020
    71 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    72 #define TLB_SIZE_MASK 0x00000090
    73 #define TLB_SIZE_1K   0x00000000
    74 #define TLB_SIZE_4K   0x00000010
    75 #define TLB_SIZE_64K  0x00000080
    76 #define TLB_SIZE_1M   0x00000090
    77 #define TLB_CACHEABLE 0x00000008
    78 #define TLB_DIRTY     0x00000004
    79 #define TLB_SHARE     0x00000002
    80 #define TLB_WRITETHRU 0x00000001
    82 #define MASK_1K  0xFFFFFC00
    83 #define MASK_4K  0xFFFFF000
    84 #define MASK_64K 0xFFFF0000
    85 #define MASK_1M  0xFFF00000
    87 struct itlb_entry {
    88     sh4addr_t vpn; // Virtual Page Number
    89     uint32_t asid; // Process ID
    90     uint32_t mask;
    91     sh4addr_t ppn; // Physical Page Number
    92     uint32_t flags;
    93 };
    95 struct utlb_entry {
    96     sh4addr_t vpn; // Virtual Page Number
    97     uint32_t mask; // Page size mask
    98     uint32_t asid; // Process ID
    99     sh4addr_t ppn; // Physical Page Number
   100     uint32_t flags;
   101     uint32_t pcmcia; // extra pcmcia data - not used
   102 };
   104 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
   105 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
   106 static uint32_t mmu_urc;
   107 static uint32_t mmu_urb;
   108 static uint32_t mmu_lrui;
   109 static uint32_t mmu_asid; // current asid
   111 static sh4ptr_t cache = NULL;
   113 static void mmu_invalidate_tlb();
   116 static uint32_t get_mask_for_flags( uint32_t flags )
   117 {
   118     switch( flags & TLB_SIZE_MASK ) {
   119     case TLB_SIZE_1K: return MASK_1K;
   120     case TLB_SIZE_4K: return MASK_4K;
   121     case TLB_SIZE_64K: return MASK_64K;
   122     case TLB_SIZE_1M: return MASK_1M;
   123     default: return 0; /* Unreachable */
   124     }
   125 }
   127 int32_t mmio_region_MMU_read( uint32_t reg )
   128 {
   129     switch( reg ) {
   130     case MMUCR:
   131         return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
   132     default:
   133         return MMIO_READ( MMU, reg );
   134     }
   135 }
   137 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   138 {
   139     uint32_t tmp;
   140     switch(reg) {
   141     case SH4VER:
   142         return;
   143     case PTEH:
   144         val &= 0xFFFFFCFF;
   145         if( (val & 0xFF) != mmu_asid ) {
   146             mmu_asid = val&0xFF;
   147             sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   148         }
   149         break;
   150     case PTEL:
   151         val &= 0x1FFFFDFF;
   152         break;
   153     case PTEA:
   154         val &= 0x0000000F;
   155         break;
   156     case TRA:
   157     	val &= 0x000003FC;
   158     	break;
   159     case EXPEVT:
   160     case INTEVT:
   161     	val &= 0x00000FFF;
   162     	break;
   163     case MMUCR:
   164         if( val & MMUCR_TI ) {
   165             mmu_invalidate_tlb();
   166         }
   167         mmu_urc = (val >> 10) & 0x3F;
   168         mmu_urb = (val >> 18) & 0x3F;
   169         mmu_lrui = (val >> 26) & 0x3F;
   170         val &= 0x00000301;
   171         tmp = MMIO_READ( MMU, MMUCR );
   172         if( (val ^ tmp) & MMUCR_AT ) {
   173             // AT flag has changed state - flush the xlt cache as all bets
   174             // are off now. We also need to force an immediate exit from the
   175             // current block
   176             MMIO_WRITE( MMU, MMUCR, val );
   177             sh4_flush_icache();
   178         }
   179         break;
   180     case CCR:
   181         mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
   182         val &= 0x81A7;
   183         break;
   184     case MMUUNK1:
   185     	/* Note that if the high bit is set, this appears to reset the machine.
   186     	 * Not emulating this behaviour yet until we know why...
   187     	 */
   188     	val &= 0x00010007;
   189     	break;
   190     case QACR0:
   191     case QACR1:
   192     	val &= 0x0000001C;
   193     	break;
   194     case PMCR1:
   195     case PMCR2:
   196         if( val != 0 ) {
   197             WARN( "Performance counters not implemented" );
   198         }
   199         break;
   200     default:
   201         break;
   202     }
   203     MMIO_WRITE( MMU, reg, val );
   204 }
   207 void MMU_init()
   208 {
   209     cache = mem_alloc_pages(2);
   210 }
   212 void MMU_reset()
   213 {
   214     mmio_region_MMU_write( CCR, 0 );
   215     mmio_region_MMU_write( MMUCR, 0 );
   216 }
   218 void MMU_save_state( FILE *f )
   219 {
   220     fwrite( cache, 4096, 2, f );
   221     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   222     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   223     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   224     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   225     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   226     fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
   227 }
   229 int MMU_load_state( FILE *f )
   230 {
   231     /* Setup the cache mode according to the saved register value
   232      * (mem_load runs before this point to load all MMIO data)
   233      */
   234     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   235     if( fread( cache, 4096, 2, f ) != 2 ) {
   236         return 1;
   237     }
   238     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   239         return 1;
   240     }
   241     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   242         return 1;
   243     }
   244     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   245         return 1;
   246     }
   247     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   248         return 1;
   249     }
   250     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   251         return 1;
   252     }
   253     if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
   254         return 1;
   255     }
   256     return 0;
   257 }
   259 void mmu_set_cache_mode( int mode )
   260 {
   261     uint32_t i;
   262     switch( mode ) {
   263     case MEM_OC_INDEX0: /* OIX=0 */
   264         for( i=OCRAM_START; i<OCRAM_END; i++ )
   265             page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
   266         break;
   267     case MEM_OC_INDEX1: /* OIX=1 */
   268         for( i=OCRAM_START; i<OCRAM_END; i++ )
   269             page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
   270         break;
   271     default: /* disabled */
   272         for( i=OCRAM_START; i<OCRAM_END; i++ )
   273             page_map[i] = NULL;
   274         break;
   275     }
   276 }
   278 /* TLB maintanence */
   280 /**
   281  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   282  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   283  */
   284 void MMU_ldtlb()
   285 {
   286     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   287     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   288     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   289     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   290     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   291     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   292 }
   294 static void mmu_invalidate_tlb()
   295 {
   296     int i;
   297     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   298         mmu_itlb[i].flags &= (~TLB_VALID);
   299     }
   300     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   301         mmu_utlb[i].flags &= (~TLB_VALID);
   302     }
   303 }
   305 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   307 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   308 {
   309     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   310     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   311 }
   312 int32_t mmu_itlb_data_read( sh4addr_t addr )
   313 {
   314     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   315     return ent->ppn | ent->flags;
   316 }
   318 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   319 {
   320     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   321     ent->vpn = val & 0xFFFFFC00;
   322     ent->asid = val & 0x000000FF;
   323     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   324 }
   326 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   327 {
   328     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   329     ent->ppn = val & 0x1FFFFC00;
   330     ent->flags = val & 0x00001DA;
   331     ent->mask = get_mask_for_flags(val);
   332 }
   334 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   335 #define UTLB_ASSOC(addr) (addr&0x80)
   336 #define UTLB_DATA2(addr) (addr&0x00800000)
   338 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   339 {
   340     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   341     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   342     ((ent->flags & TLB_DIRTY)<<7);
   343 }
   344 int32_t mmu_utlb_data_read( sh4addr_t addr )
   345 {
   346     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   347     if( UTLB_DATA2(addr) ) {
   348         return ent->pcmcia;
   349     } else {
   350         return ent->ppn | ent->flags;
   351     }
   352 }
   354 /**
   355  * Find a UTLB entry for the associative TLB write - same as the normal
   356  * lookup but ignores the valid bit.
   357  */
   358 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   359 {
   360     int result = -1;
   361     unsigned int i;
   362     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   363         if( (mmu_utlb[i].flags & TLB_VALID) &&
   364                 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
   365                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   366             if( result != -1 ) {
   367                 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
   368                 return -2;
   369             }
   370             result = i;
   371         }
   372     }
   373     return result;
   374 }
   376 /**
   377  * Find a ITLB entry for the associative TLB write - same as the normal
   378  * lookup but ignores the valid bit.
   379  */
   380 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   381 {
   382     int result = -1;
   383     unsigned int i;
   384     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   385         if( (mmu_itlb[i].flags & TLB_VALID) &&
   386                 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
   387                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   388             if( result != -1 ) {
   389                 return -2;
   390             }
   391             result = i;
   392         }
   393     }
   394     return result;
   395 }
   397 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   398 {
   399     if( UTLB_ASSOC(addr) ) {
   400         int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
   401         if( utlb >= 0 ) {
   402             struct utlb_entry *ent = &mmu_utlb[utlb];
   403             ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   404             ent->flags |= (val & TLB_VALID);
   405             ent->flags |= ((val & 0x200)>>7);
   406         }
   408         int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
   409         if( itlb >= 0 ) {
   410             struct itlb_entry *ent = &mmu_itlb[itlb];
   411             ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
   412         }
   414         if( itlb == -2 || utlb == -2 ) {
   415             MMU_TLB_MULTI_HIT_ERROR(addr);
   416             return;
   417         }
   418     } else {
   419         struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   420         ent->vpn = (val & 0xFFFFFC00);
   421         ent->asid = (val & 0xFF);
   422         ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   423         ent->flags |= (val & TLB_VALID);
   424         ent->flags |= ((val & 0x200)>>7);
   425     }
   426 }
   428 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   429 {
   430     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   431     if( UTLB_DATA2(addr) ) {
   432         ent->pcmcia = val & 0x0000000F;
   433     } else {
   434         ent->ppn = (val & 0x1FFFFC00);
   435         ent->flags = (val & 0x000001FF);
   436         ent->mask = get_mask_for_flags(val);
   437     }
   438 }
   440 /* Cache access - not implemented */
   442 int32_t mmu_icache_addr_read( sh4addr_t addr )
   443 {
   444     return 0; // not implemented
   445 }
   446 int32_t mmu_icache_data_read( sh4addr_t addr )
   447 {
   448     return 0; // not implemented
   449 }
   450 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   451 {
   452     return 0; // not implemented
   453 }
   454 int32_t mmu_ocache_data_read( sh4addr_t addr )
   455 {
   456     return 0; // not implemented
   457 }
   459 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   460 {
   461 }
   463 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   464 {
   465 }
   467 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   468 {
   469 }
   471 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   472 {
   473 }
   475 /******************************************************************************/
   476 /*                        MMU TLB address translation                         */
   477 /******************************************************************************/
   479 /**
   480  * The translations are excessively complicated, but unfortunately it's a
   481  * complicated system. TODO: make this not be painfully slow.
   482  */
   484 /**
   485  * Perform the actual utlb lookup w/ asid matching.
   486  * Possible utcomes are:
   487  *   0..63 Single match - good, return entry found
   488  *   -1 No match - raise a tlb data miss exception
   489  *   -2 Multiple matches - raise a multi-hit exception (reset)
   490  * @param vpn virtual address to resolve
   491  * @return the resultant UTLB entry, or an error.
   492  */
   493 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   494 {
   495     int result = -1;
   496     unsigned int i;
   498     mmu_urc++;
   499     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   500         mmu_urc = 0;
   501     }
   503     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   504         if( (mmu_utlb[i].flags & TLB_VALID) &&
   505                 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
   506                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   507             if( result != -1 ) {
   508                 return -2;
   509             }
   510             result = i;
   511         }
   512     }
   513     return result;
   514 }
   516 /**
   517  * Perform the actual utlb lookup matching on vpn only
   518  * Possible utcomes are:
   519  *   0..63 Single match - good, return entry found
   520  *   -1 No match - raise a tlb data miss exception
   521  *   -2 Multiple matches - raise a multi-hit exception (reset)
   522  * @param vpn virtual address to resolve
   523  * @return the resultant UTLB entry, or an error.
   524  */
   525 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   526 {
   527     int result = -1;
   528     unsigned int i;
   530     mmu_urc++;
   531     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   532         mmu_urc = 0;
   533     }
   535     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   536         if( (mmu_utlb[i].flags & TLB_VALID) &&
   537                 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   538             if( result != -1 ) {
   539                 return -2;
   540             }
   541             result = i;
   542         }
   543     }
   545     return result;
   546 }
   548 /**
   549  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   550  * @return the number (0-3) of the replaced entry.
   551  */
   552 static int inline mmu_itlb_update_from_utlb( int entryNo )
   553 {
   554     int replace;
   555     /* Determine entry to replace based on lrui */
   556     if( (mmu_lrui & 0x38) == 0x38 ) {
   557         replace = 0;
   558         mmu_lrui = mmu_lrui & 0x07;
   559     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   560         replace = 1;
   561         mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   562     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   563         replace = 2;
   564         mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   565     } else { // Note - gets invalid entries too
   566         replace = 3;
   567         mmu_lrui = (mmu_lrui | 0x0B);
   568     }
   570     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   571     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   572     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   573     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   574     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   575     return replace;
   576 }
   578 /**
   579  * Perform the actual itlb lookup w/ asid protection
   580  * Possible utcomes are:
   581  *   0..63 Single match - good, return entry found
   582  *   -1 No match - raise a tlb data miss exception
   583  *   -2 Multiple matches - raise a multi-hit exception (reset)
   584  * @param vpn virtual address to resolve
   585  * @return the resultant ITLB entry, or an error.
   586  */
   587 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   588 {
   589     int result = -1;
   590     unsigned int i;
   592     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   593         if( (mmu_itlb[i].flags & TLB_VALID) &&
   594                 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
   595                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   596             if( result != -1 ) {
   597                 return -2;
   598             }
   599             result = i;
   600         }
   601     }
   603     if( result == -1 ) {
   604         int utlbEntry = mmu_utlb_lookup_vpn_asid( vpn );
   605         if( utlbEntry < 0 ) {
   606             return utlbEntry;
   607         } else {
   608             return mmu_itlb_update_from_utlb( utlbEntry );
   609         }
   610     }
   612     switch( result ) {
   613     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   614     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   615     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   616     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   617     }
   619     return result;
   620 }
   622 /**
   623  * Perform the actual itlb lookup on vpn only
   624  * Possible utcomes are:
   625  *   0..63 Single match - good, return entry found
   626  *   -1 No match - raise a tlb data miss exception
   627  *   -2 Multiple matches - raise a multi-hit exception (reset)
   628  * @param vpn virtual address to resolve
   629  * @return the resultant ITLB entry, or an error.
   630  */
   631 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   632 {
   633     int result = -1;
   634     unsigned int i;
   636     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   637         if( (mmu_itlb[i].flags & TLB_VALID) &&
   638                 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   639             if( result != -1 ) {
   640                 return -2;
   641             }
   642             result = i;
   643         }
   644     }
   646     if( result == -1 ) {
   647         int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   648         if( utlbEntry < 0 ) {
   649             return utlbEntry;
   650         } else {
   651             return mmu_itlb_update_from_utlb( utlbEntry );
   652         }
   653     }
   655     switch( result ) {
   656     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   657     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   658     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   659     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   660     }
   662     return result;
   663 }
   665 sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
   666 {
   667     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   668     if( addr & 0x80000000 ) {
   669         if( IS_SH4_PRIVMODE() ) {
   670             if( addr >= 0xE0000000 ) {
   671                 return addr; /* P4 - passthrough */
   672             } else if( addr < 0xC0000000 ) {
   673                 /* P1, P2 regions are pass-through (no translation) */
   674                 return VMA_TO_EXT_ADDR(addr);
   675             }
   676         } else {
   677             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   678                     ((mmucr&MMUCR_SQMD) == 0) ) {
   679                 /* Conditional user-mode access to the store-queue (no translation) */
   680                 return addr;
   681             }
   682             MMU_READ_ADDR_ERROR();
   683             return MMU_VMA_ERROR;
   684         }
   685     }
   687     if( (mmucr & MMUCR_AT) == 0 ) {
   688         return VMA_TO_EXT_ADDR(addr);
   689     }
   691     /* If we get this far, translation is required */
   692     int entryNo;
   693     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   694         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   695     } else {
   696         entryNo = mmu_utlb_lookup_vpn( addr );
   697     }
   699     switch(entryNo) {
   700     case -1:
   701     MMU_TLB_READ_MISS_ERROR(addr);
   702     return MMU_VMA_ERROR;
   703     case -2:
   704     MMU_TLB_MULTI_HIT_ERROR(addr);
   705     return MMU_VMA_ERROR;
   706     default:
   707         if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   708                 !IS_SH4_PRIVMODE() ) {
   709             /* protection violation */
   710             MMU_TLB_READ_PROT_ERROR(addr);
   711             return MMU_VMA_ERROR;
   712         }
   714         /* finally generate the target address */
   715         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   716         	(addr & (~mmu_utlb[entryNo].mask));
   717         if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4
   718         	pma |= 0xE0000000;
   719         return pma;
   720     }
   721 }
   723 sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
   724 {
   725     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   726     if( addr & 0x80000000 ) {
   727         if( IS_SH4_PRIVMODE() ) {
   728             if( addr >= 0xE0000000 ) {
   729                 return addr; /* P4 - passthrough */
   730             } else if( addr < 0xC0000000 ) {
   731                 /* P1, P2 regions are pass-through (no translation) */
   732                 return VMA_TO_EXT_ADDR(addr);
   733             }
   734         } else {
   735             if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   736                     ((mmucr&MMUCR_SQMD) == 0) ) {
   737                 /* Conditional user-mode access to the store-queue (no translation) */
   738                 return addr;
   739             }
   740             MMU_WRITE_ADDR_ERROR();
   741             return MMU_VMA_ERROR;
   742         }
   743     }
   745     if( (mmucr & MMUCR_AT) == 0 ) {
   746         return VMA_TO_EXT_ADDR(addr);
   747     }
   749     /* If we get this far, translation is required */
   750     int entryNo;
   751     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   752         entryNo = mmu_utlb_lookup_vpn_asid( addr );
   753     } else {
   754         entryNo = mmu_utlb_lookup_vpn( addr );
   755     }
   757     switch(entryNo) {
   758     case -1:
   759     MMU_TLB_WRITE_MISS_ERROR(addr);
   760     return MMU_VMA_ERROR;
   761     case -2:
   762     MMU_TLB_MULTI_HIT_ERROR(addr);
   763     return MMU_VMA_ERROR;
   764     default:
   765         if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   766                 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   767             /* protection violation */
   768             MMU_TLB_WRITE_PROT_ERROR(addr);
   769             return MMU_VMA_ERROR;
   770         }
   772         if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   773             MMU_TLB_INITIAL_WRITE_ERROR(addr);
   774             return MMU_VMA_ERROR;
   775         }
   777         /* finally generate the target address */
   778         sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   779         	(addr & (~mmu_utlb[entryNo].mask));
   780         if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4
   781         	pma |= 0xE0000000;
   782         return pma;
   783     }
   784 }
   786 /**
   787  * Update the icache for an untranslated address
   788  */
   789 void mmu_update_icache_phys( sh4addr_t addr )
   790 {
   791     if( (addr & 0x1C000000) == 0x0C000000 ) {
   792         /* Main ram */
   793         sh4_icache.page_vma = addr & 0xFF000000;
   794         sh4_icache.page_ppa = 0x0C000000;
   795         sh4_icache.mask = 0xFF000000;
   796         sh4_icache.page = sh4_main_ram;
   797     } else if( (addr & 0x1FE00000) == 0 ) {
   798         /* BIOS ROM */
   799         sh4_icache.page_vma = addr & 0xFFE00000;
   800         sh4_icache.page_ppa = 0;
   801         sh4_icache.mask = 0xFFE00000;
   802         sh4_icache.page = mem_get_region(0);
   803     } else {
   804         /* not supported */
   805         sh4_icache.page_vma = -1;
   806     }
   807 }
   809 /**
   810  * Update the sh4_icache structure to describe the page(s) containing the
   811  * given vma. If the address does not reference a RAM/ROM region, the icache
   812  * will be invalidated instead.
   813  * If AT is on, this method will raise TLB exceptions normally
   814  * (hence this method should only be used immediately prior to execution of
   815  * code), and otherwise will set the icache according to the matching TLB entry.
   816  * If AT is off, this method will set the entire referenced RAM/ROM region in
   817  * the icache.
   818  * @return TRUE if the update completed (successfully or otherwise), FALSE
   819  * if an exception was raised.
   820  */
   821 gboolean mmu_update_icache( sh4vma_t addr )
   822 {
   823     int entryNo;
   824     if( IS_SH4_PRIVMODE()  ) {
   825         if( addr & 0x80000000 ) {
   826             if( addr < 0xC0000000 ) {
   827                 /* P1, P2 and P4 regions are pass-through (no translation) */
   828                 mmu_update_icache_phys(addr);
   829                 return TRUE;
   830             } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   831                 MMU_READ_ADDR_ERROR();
   832                 return FALSE;
   833             }
   834         }
   836         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   837         if( (mmucr & MMUCR_AT) == 0 ) {
   838             mmu_update_icache_phys(addr);
   839             return TRUE;
   840         }
   842         if( (mmucr & MMUCR_SV) == 0 )
   843         	entryNo = mmu_itlb_lookup_vpn_asid( addr );
   844         else
   845         	entryNo = mmu_itlb_lookup_vpn( addr );
   846     } else {
   847         if( addr & 0x80000000 ) {
   848             MMU_READ_ADDR_ERROR();
   849             return FALSE;
   850         }
   852         uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   853         if( (mmucr & MMUCR_AT) == 0 ) {
   854             mmu_update_icache_phys(addr);
   855             return TRUE;
   856         }
   858         entryNo = mmu_itlb_lookup_vpn_asid( addr );
   860         if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
   861             MMU_TLB_READ_PROT_ERROR(addr);
   862             return FALSE;
   863         }
   864     }
   866     switch(entryNo) {
   867     case -1:
   868     MMU_TLB_READ_MISS_ERROR(addr);
   869     return FALSE;
   870     case -2:
   871     MMU_TLB_MULTI_HIT_ERROR(addr);
   872     return FALSE;
   873     default:
   874         sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
   875         sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
   876         if( sh4_icache.page == NULL ) {
   877             sh4_icache.page_vma = -1;
   878         } else {
   879             sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
   880             sh4_icache.mask = mmu_itlb[entryNo].mask;
   881         }
   882         return TRUE;
   883     }
   884 }
   886 /**
   887  * Translate address for disassembly purposes (ie performs an instruction
   888  * lookup) - does not raise exceptions or modify any state, and ignores
   889  * protection bits. Returns the translated address, or MMU_VMA_ERROR
   890  * on translation failure.
   891  */
   892 sh4addr_t mmu_vma_to_phys_disasm( sh4vma_t vma )
   893 {
   894     if( vma & 0x80000000 ) {
   895         if( vma < 0xC0000000 ) {
   896             /* P1, P2 and P4 regions are pass-through (no translation) */
   897             return VMA_TO_EXT_ADDR(vma);
   898         } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
   899             /* Not translatable */
   900             return MMU_VMA_ERROR;
   901         }
   902     }
   904     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   905     if( (mmucr & MMUCR_AT) == 0 ) {
   906         return VMA_TO_EXT_ADDR(vma);
   907     }
   909     int entryNo = mmu_itlb_lookup_vpn( vma );
   910     if( entryNo == -2 ) {
   911         entryNo = mmu_itlb_lookup_vpn_asid( vma );
   912     }
   913     if( entryNo < 0 ) {
   914         return MMU_VMA_ERROR;
   915     } else {
   916         return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
   917         (vma & (~mmu_itlb[entryNo].mask));
   918     }
   919 }
   921 gboolean sh4_flush_store_queue( sh4addr_t addr )
   922 {
   923     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   924     int queue = (addr&0x20)>>2;
   925     sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
   926     sh4addr_t target;
   927     /* Store queue operation */
   928     if( mmucr & MMUCR_AT ) {
   929         int entryNo;
   930         if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   931             entryNo = mmu_utlb_lookup_vpn_asid( addr );
   932         } else {
   933             entryNo = mmu_utlb_lookup_vpn( addr );
   934         }
   935         switch(entryNo) {
   936         case -1:
   937         MMU_TLB_WRITE_MISS_ERROR(addr);
   938         return FALSE;
   939         case -2:
   940         MMU_TLB_MULTI_HIT_ERROR(addr);
   941         return FALSE;
   942         default:
   943             if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   944                     : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   945                 /* protection violation */
   946                 MMU_TLB_WRITE_PROT_ERROR(addr);
   947                 return FALSE;
   948             }
   950             if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   951                 MMU_TLB_INITIAL_WRITE_ERROR(addr);
   952                 return FALSE;
   953             }
   955             /* finally generate the target address */
   956             target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
   957                     (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
   958         }
   959     } else {
   960         uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
   961         target = (addr&0x03FFFFE0) | hi;
   962     }
   963     mem_copy_to_sh4( target, src, 32 );
   964     return TRUE;
   965 }
   967 /********************************* PMM *************************************/
   969 /**
   970  * Side note - this is here (rather than in sh4mmio.c) as the control registers
   971  * are part of the MMU block, and it seems simplest to keep it all together.
   972  */
   974 int32_t mmio_region_PMM_read( uint32_t reg )
   975 {
   976     return MMIO_READ( PMM, reg );
   977 }
   979 void mmio_region_PMM_write( uint32_t reg, uint32_t val )
   980 {
   981     /* Read-only */
   982 }
.