Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 561:533f6b478071
prev559:06714bc64271
next569:a1c49e1e8776
author nkeynes
date Tue Jan 01 08:57:33 2008 +0000 (16 years ago)
branchlxdream-mmu
permissions -rw-r--r--
last change Add breakpoint_type_t enum (general cleanup)
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * MMU implementation
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    18 #define MODULE sh4_module
    20 #include <stdio.h>
    21 #include "sh4/sh4mmio.h"
    22 #include "sh4/sh4core.h"
    23 #include "mem.h"
    25 #define OCRAM_START (0x1C000000>>PAGE_BITS)
    26 #define OCRAM_END   (0x20000000>>PAGE_BITS)
    28 #define ITLB_ENTRY_COUNT 4
    29 #define UTLB_ENTRY_COUNT 64
    31 /* Entry address */
    32 #define TLB_VALID     0x00000100
    33 #define TLB_USERMODE  0x00000040
    34 #define TLB_WRITABLE  0x00000020
    35 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
    36 #define TLB_SIZE_MASK 0x00000090
    37 #define TLB_SIZE_1K   0x00000000
    38 #define TLB_SIZE_4K   0x00000010
    39 #define TLB_SIZE_64K  0x00000080
    40 #define TLB_SIZE_1M   0x00000090
    41 #define TLB_CACHEABLE 0x00000008
    42 #define TLB_DIRTY     0x00000004
    43 #define TLB_SHARE     0x00000002
    44 #define TLB_WRITETHRU 0x00000001
    46 #define MASK_1K  0xFFFFFC00
    47 #define MASK_4K  0xFFFFF000
    48 #define MASK_64K 0xFFFF0000
    49 #define MASK_1M  0xFFF00000
    51 struct itlb_entry {
    52     sh4addr_t vpn; // Virtual Page Number
    53     uint32_t asid; // Process ID
    54     uint32_t mask;
    55     sh4addr_t ppn; // Physical Page Number
    56     uint32_t flags;
    57 };
    59 struct utlb_entry {
    60     sh4addr_t vpn; // Virtual Page Number
    61     uint32_t mask; // Page size mask
    62     uint32_t asid; // Process ID
    63     sh4addr_t ppn; // Physical Page Number
    64     uint32_t flags;
    65     uint32_t pcmcia; // extra pcmcia data - not used
    66 };
    68 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
    69 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
    70 static uint32_t mmu_urc;
    71 static uint32_t mmu_urb;
    72 static uint32_t mmu_lrui;
    74 static sh4ptr_t cache = NULL;
    76 static void mmu_invalidate_tlb();
    79 static uint32_t get_mask_for_flags( uint32_t flags )
    80 {
    81     switch( flags & TLB_SIZE_MASK ) {
    82     case TLB_SIZE_1K: return MASK_1K;
    83     case TLB_SIZE_4K: return MASK_4K;
    84     case TLB_SIZE_64K: return MASK_64K;
    85     case TLB_SIZE_1M: return MASK_1M;
    86     }
    87 }
    89 int32_t mmio_region_MMU_read( uint32_t reg )
    90 {
    91     switch( reg ) {
    92     case MMUCR:
    93 	return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
    94     default:
    95 	return MMIO_READ( MMU, reg );
    96     }
    97 }
    99 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
   100 {
   101     switch(reg) {
   102     case PTEH:
   103 	val &= 0xFFFFFCFF;
   104 	break;
   105     case PTEL:
   106 	val &= 0x1FFFFDFF;
   107 	break;
   108     case PTEA:
   109 	val &= 0x0000000F;
   110 	break;
   111     case MMUCR:
   112 	if( val & MMUCR_TI ) {
   113 	    mmu_invalidate_tlb();
   114 	}
   115 	mmu_urc = (val >> 10) & 0x3F;
   116 	mmu_urb = (val >> 18) & 0x3F;
   117 	mmu_lrui = (val >> 26) & 0x3F;
   118 	val &= 0x00000301;
   119 	break;
   120     case CCR:
   121 	mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
   122 	break;
   123     default:
   124 	break;
   125     }
   126     MMIO_WRITE( MMU, reg, val );
   127 }
   130 void MMU_init() 
   131 {
   132     cache = mem_alloc_pages(2);
   133 }
   135 void MMU_reset()
   136 {
   137     mmio_region_MMU_write( CCR, 0 );
   138 }
   140 void MMU_save_state( FILE *f )
   141 {
   142     fwrite( cache, 4096, 2, f );
   143     fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
   144     fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
   145     fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
   146     fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
   147     fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
   148 }
   150 int MMU_load_state( FILE *f )
   151 {
   152     /* Setup the cache mode according to the saved register value
   153      * (mem_load runs before this point to load all MMIO data)
   154      */
   155     mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
   156     if( fread( cache, 4096, 2, f ) != 2 ) {
   157 	return 1;
   158     }
   159     if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
   160 	return 1;
   161     }
   162     if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
   163 	return 1;
   164     }
   165     if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
   166 	return 1;
   167     }
   168     if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
   169 	return 1;
   170     }
   171     if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
   172 	return 1;
   173     }
   174     return 0;
   175 }
   177 void mmu_set_cache_mode( int mode )
   178 {
   179     uint32_t i;
   180     switch( mode ) {
   181         case MEM_OC_INDEX0: /* OIX=0 */
   182             for( i=OCRAM_START; i<OCRAM_END; i++ )
   183                 page_map[i] = cache + ((i&0x02)<<(PAGE_BITS-1));
   184             break;
   185         case MEM_OC_INDEX1: /* OIX=1 */
   186             for( i=OCRAM_START; i<OCRAM_END; i++ )
   187                 page_map[i] = cache + ((i&0x02000000)>>(25-PAGE_BITS));
   188             break;
   189         default: /* disabled */
   190             for( i=OCRAM_START; i<OCRAM_END; i++ )
   191                 page_map[i] = NULL;
   192             break;
   193     }
   194 }
   196 /* TLB maintanence */
   198 /**
   199  * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
   200  * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
   201  */
   202 void MMU_ldtlb()
   203 {
   204     mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
   205     mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
   206     mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
   207     mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
   208     mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
   209     mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
   210 }
   212 static inline void mmu_flush_pages( struct utlb_entry *ent )
   213 {
   214     unsigned int vpn;
   215     switch( ent->flags & TLB_SIZE_MASK ) {
   216     case TLB_SIZE_1K: xlat_flush_page( ent->vpn ); break;
   217     case TLB_SIZE_4K: xlat_flush_page( ent->vpn ); break;
   218     case TLB_SIZE_64K: 
   219 	for( vpn = ent->vpn; vpn < ent->vpn + 0x10000; vpn += 0x1000 ) {
   220 	    xlat_flush_page( vpn );
   221 	}
   222 	break;
   223     case TLB_SIZE_1M:
   224 	for( vpn = ent->vpn; vpn < ent->vpn + 0x100000; vpn += 0x1000 ) {
   225 	    xlat_flush_page( vpn );
   226 	}
   227 	break;
   228     }
   229 }
   231 /**
   232  * The translations are excessively complicated, but unfortunately it's a 
   233  * complicated system. It can undoubtedly be better optimized too.
   234  */
   236 /**
   237  * Perform the actual utlb lookup.
   238  * Possible utcomes are:
   239  *   0..63 Single match - good, return entry found
   240  *   -1 No match - raise a tlb data miss exception
   241  *   -2 Multiple matches - raise a multi-hit exception (reset)
   242  * @param vpn virtual address to resolve
   243  * @param asid Address space identifier
   244  * @param use_asid whether to require an asid match on non-shared pages.
   245  * @return the resultant UTLB entry, or an error.
   246  */
   247 static inline int mmu_utlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid )
   248 {
   249     int result = -1;
   250     unsigned int i;
   252     mmu_urc++;
   253     if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
   254 	mmu_urc = 0;
   255     }
   257     if( use_asid ) {
   258 	for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   259 	    if( (mmu_utlb[i].flags & TLB_VALID) &&
   260 	        ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
   261 		((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   262 		if( result != -1 ) {
   263 		    return -2;
   264 		}
   265 		result = i;
   266 	    }
   267 	}
   268     } else {
   269 	for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   270 	    if( (mmu_utlb[i].flags & TLB_VALID) &&
   271 		((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   272 		if( result != -1 ) {
   273 		    return -2;
   274 		}
   275 		result = i;
   276 	    }
   277 	}
   278     }
   279     return result;
   280 }
   282 /**
   283  * Find a UTLB entry for the associative TLB write - same as the normal
   284  * lookup but ignores the valid bit.
   285  */
   286 static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   287 {
   288     int result = -1;
   289     unsigned int i;
   290     for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
   291 	if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
   292 	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
   293 	    if( result != -1 ) {
   294 		return -2;
   295 	    }
   296 	    result = i;
   297 	}
   298     }
   299     return result;
   300 }
   302 /**
   303  * Perform the actual itlb lookup.
   304  * Possible utcomes are:
   305  *   0..63 Single match - good, return entry found
   306  *   -1 No match - raise a tlb data miss exception
   307  *   -2 Multiple matches - raise a multi-hit exception (reset)
   308  * @param vpn virtual address to resolve
   309  * @param asid Address space identifier
   310  * @param use_asid whether to require an asid match on non-shared pages.
   311  * @return the resultant ITLB entry, or an error.
   312  */
   313 static inline int mmu_itlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid )
   314 {
   315     int result = -1;
   316     unsigned int i;
   317     if( use_asid ) {
   318 	for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   319 	    if( (mmu_itlb[i].flags & TLB_VALID) &&
   320 	        ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
   321 		((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   322 		if( result != -1 ) {
   323 		    return -2;
   324 		}
   325 		result = i;
   326 	    }
   327 	}
   328     } else {
   329 	for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   330 	    if( (mmu_itlb[i].flags & TLB_VALID) &&
   331 		((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   332 		if( result != -1 ) {
   333 		    return -2;
   334 		}
   335 		result = i;
   336 	    }
   337 	}
   338     }
   340     switch( result ) {
   341     case 0: mmu_lrui = (mmu_lrui & 0x07); break;
   342     case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
   343     case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
   344     case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
   345     }
   347     return result;
   348 }
   350 static int inline mmu_itlb_update_from_utlb( int entryNo )
   351 {
   352     int replace;
   353     /* Determine entry to replace based on lrui */
   354     if( mmu_lrui & 0x38 == 0x38 ) {
   355 	replace = 0;
   356 	mmu_lrui = mmu_lrui & 0x07;
   357     } else if( (mmu_lrui & 0x26) == 0x06 ) {
   358 	replace = 1;
   359 	mmu_lrui = (mmu_lrui & 0x19) | 0x20;
   360     } else if( (mmu_lrui & 0x15) == 0x01 ) {
   361 	replace = 2;
   362 	mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
   363     } else { // Note - gets invalid entries too
   364 	replace = 3;
   365 	mmu_lrui = (mmu_lrui | 0x0B);
   366     } 
   368     mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
   369     mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
   370     mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
   371     mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
   372     mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
   373     return replace;
   374 }
   376 /**
   377  * Find a ITLB entry for the associative TLB write - same as the normal
   378  * lookup but ignores the valid bit.
   379  */
   380 static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
   381 {
   382     int result = -1;
   383     unsigned int i;
   384     for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
   385 	if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
   386 	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
   387 	    if( result != -1 ) {
   388 		return -2;
   389 	    }
   390 	    result = i;
   391 	}
   392     }
   393     return result;
   394 }
   396 #define RAISE_TLB_ERROR(code, vpn) \
   397     MMIO_WRITE(MMU, TEA, vpn); \
   398     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
   399     sh4_raise_tlb_exception(code); \
   400     return (((uint64_t)code)<<32)
   402 #define RAISE_MEM_ERROR(code, vpn) \
   403     MMIO_WRITE(MMU, TEA, vpn); \
   404     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
   405     sh4_raise_exception(code); \
   406     return (((uint64_t)code)<<32)
   408 #define RAISE_OTHER_ERROR(code) \
   409     sh4_raise_exception(code); \
   410     return (((uint64_t)EXV_EXCEPTION)<<32)
   412 /**
   413  * Abort with a non-MMU address error. Caused by user-mode code attempting
   414  * to access privileged regions, or alignment faults.
   415  */
   416 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
   417 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
   419 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
   420 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
   421 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
   422 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
   423 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
   424 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
   425     MMIO_WRITE(MMU, TEA, vpn); \
   426     MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
   427     return (((uint64_t)EXC_TLB_MULTI_HIT)<<32)
   429 uint64_t mmu_vma_to_phys_write( sh4addr_t addr )
   430 {
   431     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   432     if( addr & 0x80000000 ) {
   433 	if( IS_SH4_PRIVMODE() ) {
   434 	    if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
   435 		/* P1, P2 and P4 regions are pass-through (no translation) */
   436 		return (uint64_t)addr;
   437 	    }
   438 	} else {
   439 	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   440 		((mmucr&MMUCR_SQMD) == 0) ) {
   441 		/* Conditional user-mode access to the store-queue (no translation) */
   442 		return (uint64_t)addr;
   443 	    }
   444 	    MMU_WRITE_ADDR_ERROR();
   445 	}
   446     }
   448     if( (mmucr & MMUCR_AT) == 0 ) {
   449 	return (uint64_t)addr;
   450     }
   452     /* If we get this far, translation is required */
   454     int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
   455     uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
   457     int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
   459     switch(entryNo) {
   460     case -1:
   461 	MMU_TLB_WRITE_MISS_ERROR(addr);
   462 	break;
   463     case -2:
   464 	MMU_TLB_MULTI_HIT_ERROR(addr);
   465 	break;
   466     default:
   467 	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
   468 	    : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
   469 	    /* protection violation */
   470 	    MMU_TLB_WRITE_PROT_ERROR(addr);
   471 	}
   473 	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
   474 	    MMU_TLB_INITIAL_WRITE_ERROR(addr);
   475 	}
   477 	/* finally generate the target address */
   478 	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   479 	    (addr & (~mmu_utlb[entryNo].mask));
   480     }
   481     return -1;
   483 }
   485 uint64_t mmu_vma_to_phys_exec( sh4addr_t addr )
   486 {
   487     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   488     if( addr & 0x80000000 ) {
   489 	if( IS_SH4_PRIVMODE()  ) {
   490 	    if( addr < 0xC0000000 ) {
   491 		/* P1, P2 and P4 regions are pass-through (no translation) */
   492 		return (uint64_t)addr;
   493 	    } else if( addr >= 0xE0000000 ) {
   494 		MMU_READ_ADDR_ERROR();
   495 	    }
   496 	} else {
   497 	    MMU_READ_ADDR_ERROR();
   498 	}
   499     }
   501     if( (mmucr & MMUCR_AT) == 0 ) {
   502 	return (uint64_t)addr;
   503     }
   505     /* If we get this far, translation is required */
   506     int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
   507     uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
   509     int entryNo = mmu_itlb_lookup_vpn( addr, asid, use_asid );
   510     if( entryNo == -1 ) {
   511 	entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
   512 	if( entryNo >= 0 ) {
   513 	    entryNo = mmu_itlb_update_from_utlb( entryNo );
   514 	}
   515     }
   516     switch(entryNo) {
   517     case -1:
   518 	MMU_TLB_READ_MISS_ERROR(addr);
   519 	break;
   520     case -2:
   521 	MMU_TLB_MULTI_HIT_ERROR(addr);
   522 	break;
   523     default:
   524 	if( (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 &&
   525 	    !IS_SH4_PRIVMODE() ) {
   526 	    /* protection violation */
   527 	    MMU_TLB_READ_PROT_ERROR(addr);
   528 	}
   530 	/* finally generate the target address */
   531 	return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) | 
   532 	    (addr & (~mmu_itlb[entryNo].mask));
   533     }
   534     return -1;
   535 }
   537 uint64_t mmu_vma_to_phys_read_noexc( sh4addr_t addr ) {
   540 }
   543 uint64_t mmu_vma_to_phys_read( sh4addr_t addr )
   544 {
   545     uint32_t mmucr = MMIO_READ(MMU,MMUCR);
   546     if( addr & 0x80000000 ) {
   547 	if( IS_SH4_PRIVMODE() ) {
   548 	    if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
   549 		/* P1, P2 and P4 regions are pass-through (no translation) */
   550 		return (uint64_t)addr;
   551 	    }
   552 	} else {
   553 	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
   554 		((mmucr&MMUCR_SQMD) == 0) ) {
   555 		/* Conditional user-mode access to the store-queue (no translation) */
   556 		return (uint64_t)addr;
   557 	    }
   558 	    MMU_READ_ADDR_ERROR();
   559 	}
   560     }
   562     if( (mmucr & MMUCR_AT) == 0 ) {
   563 	return (uint64_t)addr;
   564     }
   566     /* If we get this far, translation is required */
   568     int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
   569     uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
   571     int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
   573     switch(entryNo) {
   574     case -1:
   575 	MMU_TLB_READ_MISS_ERROR(addr);
   576 	break;
   577     case -2:
   578 	MMU_TLB_MULTI_HIT_ERROR(addr);
   579 	break;
   580     default:
   581 	if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
   582 	    !IS_SH4_PRIVMODE() ) {
   583 	    /* protection violation */
   584 	    MMU_TLB_READ_PROT_ERROR(addr);
   585 	}
   587 	/* finally generate the target address */
   588 	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
   589 	    (addr & (~mmu_utlb[entryNo].mask));
   590     }
   591     return -1;
   592 }
   594 static void mmu_invalidate_tlb()
   595 {
   596     int i;
   597     for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
   598 	mmu_itlb[i].flags &= (~TLB_VALID);
   599     }
   600     for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
   601 	mmu_utlb[i].flags &= (~TLB_VALID);
   602     }
   603 }
   605 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
   607 int32_t mmu_itlb_addr_read( sh4addr_t addr )
   608 {
   609     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   610     return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
   611 }
   612 int32_t mmu_itlb_data_read( sh4addr_t addr )
   613 {
   614     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   615     return ent->ppn | ent->flags;
   616 }
   618 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
   619 {
   620     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   621     ent->vpn = val & 0xFFFFFC00;
   622     ent->asid = val & 0x000000FF;
   623     ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
   624 }
   626 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
   627 {
   628     struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
   629     ent->ppn = val & 0x1FFFFC00;
   630     ent->flags = val & 0x00001DA;
   631     ent->mask = get_mask_for_flags(val);
   632 }
   634 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
   635 #define UTLB_ASSOC(addr) (addr&0x80)
   636 #define UTLB_DATA2(addr) (addr&0x00800000)
   638 int32_t mmu_utlb_addr_read( sh4addr_t addr )
   639 {
   640     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   641     return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
   642 	((ent->flags & TLB_DIRTY)<<7);
   643 }
   644 int32_t mmu_utlb_data_read( sh4addr_t addr )
   645 {
   646     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   647     if( UTLB_DATA2(addr) ) {
   648 	return ent->pcmcia;
   649     } else {
   650 	return ent->ppn | ent->flags;
   651     }
   652 }
   654 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
   655 {
   656     if( UTLB_ASSOC(addr) ) {
   657 	uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
   658 	int entryNo = mmu_utlb_lookup_assoc( val, asid );
   659 	if( entryNo >= 0 ) {
   660 	    struct utlb_entry *ent = &mmu_utlb[entryNo];
   661 	    ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
   662 	    ent->flags |= (val & TLB_VALID);
   663 	    ent->flags |= ((val & 0x200)>>7);
   664 	} else if( entryNo == -2 ) {
   665 	    MMU_TLB_MULTI_HIT_ERROR(addr);
   666 	}
   667     } else {
   668 	struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   669 	ent->vpn = (val & 0xFFFFFC00);
   670 	ent->asid = (val & 0xFF);
   671 	ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
   672 	ent->flags |= (val & TLB_VALID);
   673 	ent->flags |= ((val & 0x200)>>7);
   674     }
   675 }
   677 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
   678 {
   679     struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
   680     if( UTLB_DATA2(addr) ) {
   681 	ent->pcmcia = val & 0x0000000F;
   682     } else {
   683 	ent->ppn = (val & 0x1FFFFC00);
   684 	ent->flags = (val & 0x000001FF);
   685 	ent->mask = get_mask_for_flags(val);
   686     }
   687 }
   689 /* Cache access - not implemented */
   691 int32_t mmu_icache_addr_read( sh4addr_t addr )
   692 {
   693     return 0; // not implemented
   694 }
   695 int32_t mmu_icache_data_read( sh4addr_t addr )
   696 {
   697     return 0; // not implemented
   698 }
   699 int32_t mmu_ocache_addr_read( sh4addr_t addr )
   700 {
   701     return 0; // not implemented
   702 }
   703 int32_t mmu_ocache_data_read( sh4addr_t addr )
   704 {
   705     return 0; // not implemented
   706 }
   708 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
   709 {
   710 }
   712 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
   713 {
   714 }
   716 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
   717 {
   718 }
   720 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
   721 {
   722 }
.