Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 569:a1c49e1e8776
prev561:533f6b478071
next570:d2893980fbf5
author nkeynes
date Fri Jan 04 11:54:17 2008 +0000 (13 years ago)
branchlxdream-mmu
permissions -rw-r--r--
last change Bring icache partially into line with the mmu, a little less slow with AT off
now.
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include "sh4/sh4mmio.h"
    22 #include "sh4/sh4core.h"
    23 #include "mem.h"
    25 #define OCRAM_START (0x1C000000>>PAGE_BITS)
    26 #define OCRAM_END   (0x20000000>>PAGE_BITS)
    28 #define ITLB_ENTRY_COUNT 4
    29 #define UTLB_ENTRY_COUNT 64
    31 /* Entry address */
    32 #define TLB_VALID     0x00000100
    33 #define TLB_USERMODE  0x00000040
    34 #define TLB_WRITABLE  0x00000020
    35 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    36 #define TLB_SIZE_MASK 0x00000090
    37 #define TLB_SIZE_1K   0x00000000
    38 #define TLB_SIZE_4K   0x00000010
    39 #define TLB_SIZE_64K  0x00000080
    40 #define TLB_SIZE_1M   0x00000090
    41 #define TLB_CACHEABLE 0x00000008
    42 #define TLB_DIRTY     0x00000004
    43 #define TLB_SHARE     0x00000002
    44 #define TLB_WRITETHRU 0x00000001
    46 #define MASK_1K  0xFFFFFC00
    47 #define MASK_4K  0xFFFFF000
    48 #define MASK_64K 0xFFFF0000
    49 #define MASK_1M  0xFFF00000
    51 struct itlb_entry {
    52     sh4addr_t vpn; // Virtual Page Number
    53     uint32_t asid; // Process ID
    54     uint32_t mask;
    55     sh4addr_t ppn; // Physical Page Number
    56     uint32_t flags;
    57 };
    59 struct utlb_entry {
    60     sh4addr_t vpn; // Virtual Page Number
    61     uint32_t mask; // Page size mask
    62     uint32_t asid; // Process ID
    63     sh4addr_t ppn; // Physical Page Number
    64     uint32_t flags;
    65     uint32_t pcmcia; // extra pcmcia data - not used
    66 };
    68 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    69 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    70 static uint32_t mmu_urc;
    71 static uint32_t mmu_urb;
    72 static uint32_t mmu_lrui;
    73 static uint32_t mmu_asid; // current asid
    75 static sh4ptr_t cache = NULL;
    77 static void mmu_invalidate_tlb();
    80 static uint32_t get_mask_for_flags( uint32_t flags )
    81 {
    82     switch( flags & TLB_SIZE_MASK ) {
    83     case TLB_SIZE_1K: return MASK_1K;
    84     case TLB_SIZE_4K: return MASK_4K;
    85     case TLB_SIZE_64K: return MASK_64K;
    86     case TLB_SIZE_1M: return MASK_1M;
    87     }
    88 }
    90 int32_t mmio_region_MMU_read( uint32_t reg )
    91 {
    92     switch( reg ) {
    93     case MMUCR:
    94 	return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
    95     default:
    96 	return MMIO_READ( MMU, reg );
    97     }
    98 }
   100 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   101 {
   102     switch(reg) {
   103     case PTEH:
   104 	val &= 0xFFFFFCFF;
   105 	if( (val & 0xFF) != mmu_asid ) {
   106 	    mmu_asid = val&0xFF;
   107 	    sh4_icache.page_vma = -1; // invalidate icache as asid has changed
   108 	}
   109 	break;
   110     case PTEL:
   111 	val &= 0x1FFFFDFF;
   112 	break;
   113     case PTEA:
   114 	val &= 0x0000000F;
   115 	break;
   116     case MMUCR:
   117 	if( val & MMUCR_TI ) {
   118 	    mmu_invalidate_tlb();
   119 	}
   120 	mmu_urc = (val >> 10) & 0x3F;
   121 	mmu_urb = (val >> 18) & 0x3F;
   122 	mmu_lrui = (val >> 26) & 0x3F;
   123 	val &= 0x00000301;
   124 	break;
   125     case CCR:
   126 	mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
   127 	break;
   128     default:
   129 	break;
   130     }
   131     MMIO_WRITE( MMU, reg, val );
   132 }
   135 void MMU_init() 
   136 {
   137     cache = mem_alloc_pages(2);
   138 }
   140 void MMU_reset()
   141 {
   142     mmio_region_MMU_write( CCR, 0 );
   143 }
   145 void MMU_save_state( FILE *f )
   146 {
   147     fwrite( cache, 4096, 2, f );
   148     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   149     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   150     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   151     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   152     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   153 }
   155 int MMU_load_state( FILE *f )
   156 {
   157     /* Setup the cache mode according to the saved register value
   158      * (mem_load runs before this point to load all MMIO data)
   159      */
   160     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   161     if( fread( cache, 4096, 2, f ) != 2 ) {
   162 	return 1;
   163     }
   164     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   165 	return 1;
   166     }
   167     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   168 	return 1;
   169     }
   170     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   171 	return 1;
   172     }
   173     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   174 	return 1;
   175     }
   176     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   177 	return 1;
   178     }
   179     return 0;
   180 }
   182 void mmu_set_cache_mode( int mode )
   183 {
   184     uint32_t i;
   185     switch( mode ) {
   186         case MEM_OC_INDEX0: /* OIX=0 */
   187             for( i=OCRAM_START; i<OCRAM_END; i++ )
   188                 page_map[i] = cache + ((i&0x02)<<(PAGE_BITS-1));
   189             break;
   190         case MEM_OC_INDEX1: /* OIX=1 */
   191             for( i=OCRAM_START; i<OCRAM_END; i++ )
   192                 page_map[i] = cache + ((i&0x02000000)>>(25-PAGE_BITS));
   193             break;
   194         default: /* disabled */
   195             for( i=OCRAM_START; i<OCRAM_END; i++ )
   196                 page_map[i] = NULL;
   197             break;
   198     }
   199 }
   201 /* TLB maintanence */
   203 /**
   204  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   205  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   206  */
   207 void MMU_ldtlb()
   208 {
   209     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   210     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   211     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   212     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   213     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   214     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   215 }
   217 static inline void mmu_flush_pages( struct utlb_entry *ent )
   218 {
   219     unsigned int vpn;
   220     switch( ent->flags & TLB_SIZE_MASK ) {
   221     case TLB_SIZE_1K: xlat_flush_page( ent->vpn ); break;
   222     case TLB_SIZE_4K: xlat_flush_page( ent->vpn ); break;
   223     case TLB_SIZE_64K: 
   224 	for( vpn = ent->vpn; vpn < ent->vpn + 0x10000; vpn += 0x1000 ) {
   225 	    xlat_flush_page( vpn );
   226 	}
   227 	break;
   228     case TLB_SIZE_1M:
   229 	for( vpn = ent->vpn; vpn < ent->vpn + 0x100000; vpn += 0x1000 ) {
   230 	    xlat_flush_page( vpn );
   231 	}
   232 	break;
   233     }
   234 }
   236 /**
   237  * The translations are excessively complicated, but unfortunately it's a 
   238  * complicated system. It can undoubtedly be better optimized too.
   239  */
   241 /**
   242  * Perform the actual utlb lookup w/ asid matching.
   243  * Possible utcomes are:
   244  *   0..63 Single match - good, return entry found
   245  *   -1 No match - raise a tlb data miss exception
   246  *   -2 Multiple matches - raise a multi-hit exception (reset)
   247  * @param vpn virtual address to resolve
   248  * @return the resultant UTLB entry, or an error.
   249  */
   250 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
   251 {
   252     int result = -1;
   253     unsigned int i;
   255     mmu_urc++;
   256     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   257 	mmu_urc = 0;
   258     }
   260     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   261 	if( (mmu_utlb[i].flags & TLB_VALID) &&
   262 	    ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) && 
   263 	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   264 	    if( result != -1 ) {
   265 		return -2;
   266 	    }
   267 	    result = i;
   268 	}
   269     }
   270     return result;
   271 }
   273 /**
   274  * Perform the actual utlb lookup matching on vpn only
   275  * Possible utcomes are:
   276  *   0..63 Single match - good, return entry found
   277  *   -1 No match - raise a tlb data miss exception
   278  *   -2 Multiple matches - raise a multi-hit exception (reset)
   279  * @param vpn virtual address to resolve
   280  * @return the resultant UTLB entry, or an error.
   281  */
   282 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
   283 {
   284     int result = -1;
   285     unsigned int i;
   287     mmu_urc++;
   288     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   289 	mmu_urc = 0;
   290     }
   292     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   293 	if( (mmu_utlb[i].flags & TLB_VALID) &&
   294 	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   295 	    if( result != -1 ) {
   296 		return -2;
   297 	    }
   298 	    result = i;
   299 	}
   300     }
   302     return result;
   303 }
   305 /**
   306  * Find a UTLB entry for the associative TLB write - same as the normal
   307  * lookup but ignores the valid bit.
   308  */
   309 static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   310 {
   311     int result = -1;
   312     unsigned int i;
   313     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   314 	if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
   315 	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   316 	    if( result != -1 ) {
   317 		return -2;
   318 	    }
   319 	    result = i;
   320 	}
   321     }
   322     return result;
   323 }
   325 /**
   326  * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
   327  * @return the number (0-3) of the replaced entry.
   328  */
   329 static int inline mmu_itlb_update_from_utlb( int entryNo )
   330 {
   331     int replace;
   332     /* Determine entry to replace based on lrui */
   333     if( mmu_lrui & 0x38 == 0x38 ) {
   334 	replace = 0;
   335 	mmu_lrui = mmu_lrui & 0x07;
   336     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   337 	replace = 1;
   338 	mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   339     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   340 	replace = 2;
   341 	mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   342     } else { // Note - gets invalid entries too
   343 	replace = 3;
   344 	mmu_lrui = (mmu_lrui | 0x0B);
   345     } 
   347     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   348     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   349     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   350     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   351     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   352     return replace;
   353 }
   355 /**
   356  * Perform the actual itlb lookup w/ asid protection
   357  * Possible utcomes are:
   358  *   0..63 Single match - good, return entry found
   359  *   -1 No match - raise a tlb data miss exception
   360  *   -2 Multiple matches - raise a multi-hit exception (reset)
   361  * @param vpn virtual address to resolve
   362  * @return the resultant ITLB entry, or an error.
   363  */
   364 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
   365 {
   366     int result = -1;
   367     unsigned int i;
   369     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   370 	if( (mmu_itlb[i].flags & TLB_VALID) &&
   371 	    ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) && 
   372 	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   373 	    if( result != -1 ) {
   374 		return -2;
   375 	    }
   376 	    result = i;
   377 	}
   378     }
   380     if( result == -1 ) {
   381 	int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   382 	if( utlbEntry == -1 ) {
   383 	    return -1;
   384 	} else {
   385 	    return mmu_itlb_update_from_utlb( utlbEntry );
   386 	}
   387     }
   389     switch( result ) {
   390     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   391     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   392     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   393     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   394     }
   396     return result;
   397 }
   399 /**
   400  * Perform the actual itlb lookup on vpn only
   401  * Possible utcomes are:
   402  *   0..63 Single match - good, return entry found
   403  *   -1 No match - raise a tlb data miss exception
   404  *   -2 Multiple matches - raise a multi-hit exception (reset)
   405  * @param vpn virtual address to resolve
   406  * @return the resultant ITLB entry, or an error.
   407  */
   408 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
   409 {
   410     int result = -1;
   411     unsigned int i;
   413     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   414 	if( (mmu_itlb[i].flags & TLB_VALID) &&
   415 	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   416 	    if( result != -1 ) {
   417 		return -2;
   418 	    }
   419 	    result = i;
   420 	}
   421     }
   423     if( result == -1 ) {
   424 	int utlbEntry = mmu_utlb_lookup_vpn( vpn );
   425 	if( utlbEntry == -1 ) {
   426 	    return -1;
   427 	} else {
   428 	    return mmu_itlb_update_from_utlb( utlbEntry );
   429 	}
   430     }
   432     switch( result ) {
   433     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   434     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   435     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   436     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   437     }
   439     return result;
   440 }
   442 /**
   443  * Find a ITLB entry for the associative TLB write - same as the normal
   444  * lookup but ignores the valid bit.
   445  */
   446 static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   447 {
   448     int result = -1;
   449     unsigned int i;
   450     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   451 	if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
   452 	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   453 	    if( result != -1 ) {
   454 		return -2;
   455 	    }
   456 	    result = i;
   457 	}
   458     }
   459     return result;
   460 }
   462 #define RAISE_TLB_ERROR(code, vpn) \
   463     MMIO_WRITE(MMU, TEA, vpn); \
   464     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
   465     sh4_raise_tlb_exception(code);
   467 #define RAISE_MEM_ERROR(code, vpn) \
   468     MMIO_WRITE(MMU, TEA, vpn); \
   469     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
   470     sh4_raise_exception(code);
   472 #define RAISE_OTHER_ERROR(code) \
   473     sh4_raise_exception(code);
   475 /**
   476  * Abort with a non-MMU address error. Caused by user-mode code attempting
   477  * to access privileged regions, or alignment faults.
   478  */
   479 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
   480 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
   482 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
   483 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
   484 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
   485 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
   486 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
   487 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
   488     MMIO_WRITE(MMU, TEA, vpn); \
   489     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
   491 uint64_t mmu_vma_to_phys_write( sh4addr_t addr )
   492 {
   493     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   494     if( addr & 0x80000000 ) {
   495 	if( IS_SH4_PRIVMODE() ) {
   496 	    if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
   497 		/* P1, P2 and P4 regions are pass-through (no translation) */
   498 		return (uint64_t)addr;
   499 	    }
   500 	} else {
   501 	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   502 		((mmucr&MMUCR_SQMD) == 0) ) {
   503 		/* Conditional user-mode access to the store-queue (no translation) */
   504 		return (uint64_t)addr;
   505 	    }
   506 	    MMU_WRITE_ADDR_ERROR();
   507 	    return 0x100000000LL;
   508 	}
   509     }
   511     if( (mmucr & MMUCR_AT) == 0 ) {
   512 	return (uint64_t)addr;
   513     }
   515     /* If we get this far, translation is required */
   516     int entryNo;
   517     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   518 	entryNo = mmu_utlb_lookup_vpn_asid( addr );
   519     } else {
   520 	entryNo = mmu_utlb_lookup_vpn( addr );
   521     }
   523     switch(entryNo) {
   524     case -1:
   525 	MMU_TLB_WRITE_MISS_ERROR(addr);
   526 	return 0x100000000LL;
   527     case -2:
   528 	MMU_TLB_MULTI_HIT_ERROR(addr);
   529         return 0x100000000LL;
   530     default:
   531 	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   532 	    : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   533 	    /* protection violation */
   534 	    MMU_TLB_WRITE_PROT_ERROR(addr);
   535 	    return 0x100000000LL;
   536 	}
   538 	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   539 	    MMU_TLB_INITIAL_WRITE_ERROR(addr);
   540 	    return 0x100000000LL;
   541 	}
   543 	/* finally generate the target address */
   544 	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   545 	    (addr & (~mmu_utlb[entryNo].mask));
   546     }
   547     return -1;
   549 }
   551 uint64_t mmu_vma_to_phys_read( sh4addr_t addr )
   552 {
   553     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   554     if( addr & 0x80000000 ) {
   555 	if( IS_SH4_PRIVMODE() ) {
   556 	    if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
   557 		/* P1, P2 and P4 regions are pass-through (no translation) */
   558 		return (uint64_t)addr;
   559 	    }
   560 	} else {
   561 	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   562 		((mmucr&MMUCR_SQMD) == 0) ) {
   563 		/* Conditional user-mode access to the store-queue (no translation) */
   564 		return (uint64_t)addr;
   565 	    }
   566 	    MMU_READ_ADDR_ERROR();
   567 	    return 0x100000000LL;
   568 	}
   569     }
   571     if( (mmucr & MMUCR_AT) == 0 ) {
   572 	return (uint64_t)addr;
   573     }
   575     /* If we get this far, translation is required */
   576     int entryNo;
   577     if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
   578 	entryNo = mmu_utlb_lookup_vpn_asid( addr );
   579     } else {
   580 	entryNo = mmu_utlb_lookup_vpn( addr );
   581     }
   583     switch(entryNo) {
   584     case -1:
   585 	MMU_TLB_READ_MISS_ERROR(addr);
   586 	return 0x100000000LL;
   587     case -2:
   588 	MMU_TLB_MULTI_HIT_ERROR(addr);
   589 	return 0x100000000LL;
   590     default:
   591 	if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   592 	    !IS_SH4_PRIVMODE() ) {
   593 	    /* protection violation */
   594 	    MMU_TLB_READ_PROT_ERROR(addr);
   595 	    return 0x100000000LL;
   596 	}
   598 	/* finally generate the target address */
   599 	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   600 	    (addr & (~mmu_utlb[entryNo].mask));
   601     }
   602     return -1;
   603 }
   605 static void mmu_invalidate_tlb()
   606 {
   607     int i;
   608     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   609 	mmu_itlb[i].flags &= (~TLB_VALID);
   610     }
   611     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   612 	mmu_utlb[i].flags &= (~TLB_VALID);
   613     }
   614 }
   616 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   618 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   619 {
   620     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   621     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   622 }
   623 int32_t mmu_itlb_data_read( sh4addr_t addr )
   624 {
   625     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   626     return ent->ppn | ent->flags;
   627 }
   629 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   630 {
   631     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   632     ent->vpn = val & 0xFFFFFC00;
   633     ent->asid = val & 0x000000FF;
   634     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   635 }
   637 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   638 {
   639     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   640     ent->ppn = val & 0x1FFFFC00;
   641     ent->flags = val & 0x00001DA;
   642     ent->mask = get_mask_for_flags(val);
   643 }
   645 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   646 #define UTLB_ASSOC(addr) (addr&0x80)
   647 #define UTLB_DATA2(addr) (addr&0x00800000)
   649 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   650 {
   651     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   652     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   653 	((ent->flags & TLB_DIRTY)<<7);
   654 }
   655 int32_t mmu_utlb_data_read( sh4addr_t addr )
   656 {
   657     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   658     if( UTLB_DATA2(addr) ) {
   659 	return ent->pcmcia;
   660     } else {
   661 	return ent->ppn | ent->flags;
   662     }
   663 }
   665 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   666 {
   667     if( UTLB_ASSOC(addr) ) {
   668 	uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
   669 	int entryNo = mmu_utlb_lookup_assoc( val, asid );
   670 	if( entryNo >= 0 ) {
   671 	    struct utlb_entry *ent = &mmu_utlb[entryNo];
   672 	    ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   673 	    ent->flags |= (val & TLB_VALID);
   674 	    ent->flags |= ((val & 0x200)>>7);
   675 	} else if( entryNo == -2 ) {
   676 	    MMU_TLB_MULTI_HIT_ERROR(addr);
   677 	}
   678     } else {
   679 	struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   680 	ent->vpn = (val & 0xFFFFFC00);
   681 	ent->asid = (val & 0xFF);
   682 	ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   683 	ent->flags |= (val & TLB_VALID);
   684 	ent->flags |= ((val & 0x200)>>7);
   685     }
   686 }
   688 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   689 {
   690     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   691     if( UTLB_DATA2(addr) ) {
   692 	ent->pcmcia = val & 0x0000000F;
   693     } else {
   694 	ent->ppn = (val & 0x1FFFFC00);
   695 	ent->flags = (val & 0x000001FF);
   696 	ent->mask = get_mask_for_flags(val);
   697     }
   698 }
   700 /* Cache access - not implemented */
   702 int32_t mmu_icache_addr_read( sh4addr_t addr )
   703 {
   704     return 0; // not implemented
   705 }
   706 int32_t mmu_icache_data_read( sh4addr_t addr )
   707 {
   708     return 0; // not implemented
   709 }
   710 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   711 {
   712     return 0; // not implemented
   713 }
   714 int32_t mmu_ocache_data_read( sh4addr_t addr )
   715 {
   716     return 0; // not implemented
   717 }
   719 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   720 {
   721 }
   723 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   724 {
   725 }
   727 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   728 {
   729 }
   731 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   732 {
   733 }
   735 /**
   736  * Update the icache for an untranslated address
   737  */
   738 void mmu_update_icache_phys( sh4addr_t addr )
   739 {
   740     if( (addr & 0x1C000000) == 0x0C000000 ) {
   741 	/* Main ram */
   742 	sh4_icache.page_vma = addr & 0xFF000000;
   743 	sh4_icache.page_ppa = 0x0C000000;
   744 	sh4_icache.mask = 0xFF000000;
   745 	sh4_icache.page = sh4_main_ram;
   746     } else if( (addr & 0x1FE00000 == 0 ) ) {
   747 	/* BIOS ROM */
   748 	sh4_icache.page_vma = addr & 0xFFE00000;
   749 	sh4_icache.page_ppa = 0;
   750 	sh4_icache.mask = 0xFFE00000;
   751 	sh4_icache.page = mem_get_region(0);
   752     } else {
   753 	/* not supported */
   754 	sh4_icache.page_vma = -1;
   755     }
   756 }
   758 /**
   759  * Update the sh4_icache structure to describe the page(s) containing the
   760  * given vma. If the address does not reference a RAM/ROM region, the icache
   761  * will be invalidated instead.
   762  * If AT is on, this method will raise TLB exceptions normally
   763  * (hence this method should only be used immediately prior to execution of
   764  * code), and otherwise will set the icache according to the matching TLB entry.
   765  * If AT is off, this method will set the entire referenced RAM/ROM region in
   766  * the icache.
   767  * @return TRUE if the update completed (successfully or otherwise), FALSE
   768  * if an exception was raised.
   769  */
   770 gboolean mmu_update_icache( sh4vma_t addr )
   771 {
   772     int entryNo;
   773     if( IS_SH4_PRIVMODE()  ) {
   774 	if( addr & 0x80000000 ) {
   775 	    if( addr < 0xC0000000 ) {
   776 		/* P1, P2 and P4 regions are pass-through (no translation) */
   777 		mmu_update_icache_phys(addr);
   778 		return TRUE;
   779 	    } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
   780 		MMU_READ_ADDR_ERROR();
   781 		return FALSE;
   782 	    }
   783 	} else {
   784 	    MMU_READ_ADDR_ERROR();
   785 	    return FALSE;
   786 	}
   788 	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   789 	if( (mmucr & MMUCR_AT) == 0 ) {
   790 	    mmu_update_icache_phys(addr);
   791 	    return TRUE;
   792 	}
   794 	entryNo = mmu_itlb_lookup_vpn( addr );
   795     } else {
   796 	if( addr & 0x80000000 ) {
   797 	    MMU_READ_ADDR_ERROR();
   798 	    return FALSE;
   799 	}
   801 	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   802 	if( (mmucr & MMUCR_AT) == 0 ) {
   803 	    mmu_update_icache_phys(addr);
   804 	    return TRUE;
   805 	}
   807 	if( mmucr & MMUCR_SV ) {
   808 	    entryNo = mmu_itlb_lookup_vpn( addr );
   809 	} else {
   810 	    entryNo = mmu_itlb_lookup_vpn_asid( addr );
   811 	}
   812 	if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
   813 	    MMU_TLB_READ_PROT_ERROR(addr);
   814 	    return FALSE;
   815 	}
   816     }
   818     switch(entryNo) {
   819     case -1:
   820 	MMU_TLB_READ_MISS_ERROR(addr);
   821 	return FALSE;
   822     case -2:
   823 	MMU_TLB_MULTI_HIT_ERROR(addr);
   824 	return FALSE;
   825     default:
   826 	sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
   827 	sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
   828 	if( sh4_icache.page == NULL ) {
   829 	    sh4_icache.page_vma = -1;
   830 	} else {
   831 	    sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
   832 	    sh4_icache.mask = mmu_itlb[entryNo].mask;
   833 	}
   834 	return TRUE;
   835     }
   836 }
.