Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 577:a181aeacd6e8
prev571:9bc09948d0f2
next583:ba995fadf173
author nkeynes
date Mon Jan 14 10:23:49 2008 +0000 (16 years ago)
branchlxdream-mmu
permissions -rw-r--r--
last change Remove asm file and convert to inline (easier to cope with platform conventions)
Add breakpoint support
Add MMU store-queue support
file annotate diff log raw
nkeynes@550
     1
/**
nkeynes@561
     2
 * $Id$
nkeynes@550
     3
 * 
nkeynes@550
     4
 * MMU implementation
nkeynes@550
     5
 *
nkeynes@550
     6
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@550
     7
 *
nkeynes@550
     8
 * This program is free software; you can redistribute it and/or modify
nkeynes@550
     9
 * it under the terms of the GNU General Public License as published by
nkeynes@550
    10
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@550
    11
 * (at your option) any later version.
nkeynes@550
    12
 *
nkeynes@550
    13
 * This program is distributed in the hope that it will be useful,
nkeynes@550
    14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@550
    15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@550
    16
 * GNU General Public License for more details.
nkeynes@550
    17
 */
nkeynes@550
    18
#define MODULE sh4_module
nkeynes@550
    19
nkeynes@550
    20
#include <stdio.h>
nkeynes@550
    21
#include "sh4/sh4mmio.h"
nkeynes@550
    22
#include "sh4/sh4core.h"
nkeynes@550
    23
#include "mem.h"
nkeynes@550
    24
nkeynes@570
    25
#define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
nkeynes@570
    26
nkeynes@570
    27
/* The MMU (practically unique in the system) is allowed to raise exceptions
nkeynes@570
    28
 * directly, with a return code indicating that one was raised and the caller
nkeynes@570
    29
 * had better behave appropriately.
nkeynes@570
    30
 */
nkeynes@570
    31
#define RAISE_TLB_ERROR(code, vpn) \
nkeynes@570
    32
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@570
    33
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
nkeynes@570
    34
    sh4_raise_tlb_exception(code);
nkeynes@570
    35
nkeynes@570
    36
#define RAISE_MEM_ERROR(code, vpn) \
nkeynes@570
    37
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@570
    38
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
nkeynes@570
    39
    sh4_raise_exception(code);
nkeynes@570
    40
nkeynes@570
    41
#define RAISE_OTHER_ERROR(code) \
nkeynes@570
    42
    sh4_raise_exception(code);
nkeynes@570
    43
/**
nkeynes@570
    44
 * Abort with a non-MMU address error. Caused by user-mode code attempting
nkeynes@570
    45
 * to access privileged regions, or alignment faults.
nkeynes@570
    46
 */
nkeynes@570
    47
#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
nkeynes@570
    48
#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
nkeynes@570
    49
nkeynes@570
    50
#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
nkeynes@570
    51
#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
nkeynes@570
    52
#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
nkeynes@570
    53
#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
nkeynes@570
    54
#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
nkeynes@570
    55
#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
nkeynes@570
    56
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@570
    57
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
nkeynes@570
    58
nkeynes@570
    59
nkeynes@550
    60
#define OCRAM_START (0x1C000000>>PAGE_BITS)
nkeynes@550
    61
#define OCRAM_END   (0x20000000>>PAGE_BITS)
nkeynes@550
    62
nkeynes@550
    63
#define ITLB_ENTRY_COUNT 4
nkeynes@550
    64
#define UTLB_ENTRY_COUNT 64
nkeynes@550
    65
nkeynes@550
    66
/* Entry address */
nkeynes@550
    67
#define TLB_VALID     0x00000100
nkeynes@550
    68
#define TLB_USERMODE  0x00000040
nkeynes@550
    69
#define TLB_WRITABLE  0x00000020
nkeynes@559
    70
#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
nkeynes@550
    71
#define TLB_SIZE_MASK 0x00000090
nkeynes@550
    72
#define TLB_SIZE_1K   0x00000000
nkeynes@550
    73
#define TLB_SIZE_4K   0x00000010
nkeynes@550
    74
#define TLB_SIZE_64K  0x00000080
nkeynes@550
    75
#define TLB_SIZE_1M   0x00000090
nkeynes@550
    76
#define TLB_CACHEABLE 0x00000008
nkeynes@550
    77
#define TLB_DIRTY     0x00000004
nkeynes@550
    78
#define TLB_SHARE     0x00000002
nkeynes@550
    79
#define TLB_WRITETHRU 0x00000001
nkeynes@550
    80
nkeynes@559
    81
#define MASK_1K  0xFFFFFC00
nkeynes@559
    82
#define MASK_4K  0xFFFFF000
nkeynes@559
    83
#define MASK_64K 0xFFFF0000
nkeynes@559
    84
#define MASK_1M  0xFFF00000
nkeynes@550
    85
nkeynes@550
    86
struct itlb_entry {
nkeynes@550
    87
    sh4addr_t vpn; // Virtual Page Number
nkeynes@550
    88
    uint32_t asid; // Process ID
nkeynes@559
    89
    uint32_t mask;
nkeynes@550
    90
    sh4addr_t ppn; // Physical Page Number
nkeynes@550
    91
    uint32_t flags;
nkeynes@550
    92
};
nkeynes@550
    93
nkeynes@550
    94
struct utlb_entry {
nkeynes@550
    95
    sh4addr_t vpn; // Virtual Page Number
nkeynes@559
    96
    uint32_t mask; // Page size mask
nkeynes@550
    97
    uint32_t asid; // Process ID
nkeynes@550
    98
    sh4addr_t ppn; // Physical Page Number
nkeynes@550
    99
    uint32_t flags;
nkeynes@550
   100
    uint32_t pcmcia; // extra pcmcia data - not used
nkeynes@550
   101
};
nkeynes@550
   102
nkeynes@550
   103
static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
nkeynes@550
   104
static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
nkeynes@550
   105
static uint32_t mmu_urc;
nkeynes@550
   106
static uint32_t mmu_urb;
nkeynes@550
   107
static uint32_t mmu_lrui;
nkeynes@569
   108
static uint32_t mmu_asid; // current asid
nkeynes@550
   109
nkeynes@550
   110
static sh4ptr_t cache = NULL;
nkeynes@550
   111
nkeynes@550
   112
static void mmu_invalidate_tlb();
nkeynes@550
   113
nkeynes@550
   114
nkeynes@559
   115
static uint32_t get_mask_for_flags( uint32_t flags )
nkeynes@559
   116
{
nkeynes@559
   117
    switch( flags & TLB_SIZE_MASK ) {
nkeynes@559
   118
    case TLB_SIZE_1K: return MASK_1K;
nkeynes@559
   119
    case TLB_SIZE_4K: return MASK_4K;
nkeynes@559
   120
    case TLB_SIZE_64K: return MASK_64K;
nkeynes@559
   121
    case TLB_SIZE_1M: return MASK_1M;
nkeynes@559
   122
    }
nkeynes@559
   123
}
nkeynes@559
   124
nkeynes@550
   125
int32_t mmio_region_MMU_read( uint32_t reg )
nkeynes@550
   126
{
nkeynes@550
   127
    switch( reg ) {
nkeynes@550
   128
    case MMUCR:
nkeynes@550
   129
	return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
nkeynes@550
   130
    default:
nkeynes@550
   131
	return MMIO_READ( MMU, reg );
nkeynes@550
   132
    }
nkeynes@550
   133
}
nkeynes@550
   134
nkeynes@550
   135
void mmio_region_MMU_write( uint32_t reg, uint32_t val )
nkeynes@550
   136
{
nkeynes@570
   137
    uint32_t tmp;
nkeynes@550
   138
    switch(reg) {
nkeynes@550
   139
    case PTEH:
nkeynes@550
   140
	val &= 0xFFFFFCFF;
nkeynes@569
   141
	if( (val & 0xFF) != mmu_asid ) {
nkeynes@569
   142
	    mmu_asid = val&0xFF;
nkeynes@569
   143
	    sh4_icache.page_vma = -1; // invalidate icache as asid has changed
nkeynes@569
   144
	}
nkeynes@550
   145
	break;
nkeynes@550
   146
    case PTEL:
nkeynes@550
   147
	val &= 0x1FFFFDFF;
nkeynes@550
   148
	break;
nkeynes@550
   149
    case PTEA:
nkeynes@550
   150
	val &= 0x0000000F;
nkeynes@550
   151
	break;
nkeynes@550
   152
    case MMUCR:
nkeynes@550
   153
	if( val & MMUCR_TI ) {
nkeynes@550
   154
	    mmu_invalidate_tlb();
nkeynes@550
   155
	}
nkeynes@550
   156
	mmu_urc = (val >> 10) & 0x3F;
nkeynes@550
   157
	mmu_urb = (val >> 18) & 0x3F;
nkeynes@550
   158
	mmu_lrui = (val >> 26) & 0x3F;
nkeynes@550
   159
	val &= 0x00000301;
nkeynes@570
   160
	tmp = MMIO_READ( MMU, MMUCR );
nkeynes@571
   161
	if( ((val ^ tmp) & MMUCR_AT) && sh4_is_using_xlat() ) {
nkeynes@570
   162
	    // AT flag has changed state - flush the xlt cache as all bets
nkeynes@570
   163
	    // are off now. We also need to force an immediate exit from the
nkeynes@570
   164
	    // current block
nkeynes@571
   165
	    MMIO_WRITE( MMU, MMUCR, val );
nkeynes@571
   166
	    sh4_translate_flush_cache();
nkeynes@570
   167
	}
nkeynes@550
   168
	break;
nkeynes@550
   169
    case CCR:
nkeynes@550
   170
	mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
nkeynes@550
   171
	break;
nkeynes@550
   172
    default:
nkeynes@550
   173
	break;
nkeynes@550
   174
    }
nkeynes@550
   175
    MMIO_WRITE( MMU, reg, val );
nkeynes@550
   176
}
nkeynes@550
   177
nkeynes@550
   178
nkeynes@550
   179
void MMU_init() 
nkeynes@550
   180
{
nkeynes@550
   181
    cache = mem_alloc_pages(2);
nkeynes@550
   182
}
nkeynes@550
   183
nkeynes@550
   184
void MMU_reset()
nkeynes@550
   185
{
nkeynes@550
   186
    mmio_region_MMU_write( CCR, 0 );
nkeynes@550
   187
}
nkeynes@550
   188
nkeynes@550
   189
void MMU_save_state( FILE *f )
nkeynes@550
   190
{
nkeynes@550
   191
    fwrite( cache, 4096, 2, f );
nkeynes@550
   192
    fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
nkeynes@550
   193
    fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
nkeynes@559
   194
    fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
nkeynes@559
   195
    fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
nkeynes@559
   196
    fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
nkeynes@570
   197
    fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
nkeynes@550
   198
}
nkeynes@550
   199
nkeynes@550
   200
int MMU_load_state( FILE *f )
nkeynes@550
   201
{
nkeynes@550
   202
    /* Setup the cache mode according to the saved register value
nkeynes@550
   203
     * (mem_load runs before this point to load all MMIO data)
nkeynes@550
   204
     */
nkeynes@550
   205
    mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
nkeynes@550
   206
    if( fread( cache, 4096, 2, f ) != 2 ) {
nkeynes@550
   207
	return 1;
nkeynes@550
   208
    }
nkeynes@550
   209
    if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
nkeynes@550
   210
	return 1;
nkeynes@550
   211
    }
nkeynes@550
   212
    if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
nkeynes@550
   213
	return 1;
nkeynes@550
   214
    }
nkeynes@559
   215
    if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
nkeynes@559
   216
	return 1;
nkeynes@559
   217
    }
nkeynes@559
   218
    if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
nkeynes@559
   219
	return 1;
nkeynes@559
   220
    }
nkeynes@559
   221
    if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
nkeynes@559
   222
	return 1;
nkeynes@559
   223
    }
nkeynes@570
   224
    if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
nkeynes@571
   225
    	return 1;
nkeynes@570
   226
    }
nkeynes@550
   227
    return 0;
nkeynes@550
   228
}
nkeynes@550
   229
nkeynes@550
   230
void mmu_set_cache_mode( int mode )
nkeynes@550
   231
{
nkeynes@550
   232
    uint32_t i;
nkeynes@550
   233
    switch( mode ) {
nkeynes@550
   234
        case MEM_OC_INDEX0: /* OIX=0 */
nkeynes@550
   235
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   236
                page_map[i] = cache + ((i&0x02)<<(PAGE_BITS-1));
nkeynes@550
   237
            break;
nkeynes@550
   238
        case MEM_OC_INDEX1: /* OIX=1 */
nkeynes@550
   239
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   240
                page_map[i] = cache + ((i&0x02000000)>>(25-PAGE_BITS));
nkeynes@550
   241
            break;
nkeynes@550
   242
        default: /* disabled */
nkeynes@550
   243
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   244
                page_map[i] = NULL;
nkeynes@550
   245
            break;
nkeynes@550
   246
    }
nkeynes@550
   247
}
nkeynes@550
   248
nkeynes@550
   249
/* TLB maintanence */
nkeynes@550
   250
nkeynes@550
   251
/**
nkeynes@550
   252
 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
nkeynes@550
   253
 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
nkeynes@550
   254
 */
nkeynes@550
   255
void MMU_ldtlb()
nkeynes@550
   256
{
nkeynes@550
   257
    mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
nkeynes@550
   258
    mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
nkeynes@550
   259
    mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
nkeynes@550
   260
    mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
nkeynes@550
   261
    mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
nkeynes@559
   262
    mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
nkeynes@550
   263
}
nkeynes@550
   264
nkeynes@570
   265
static void mmu_invalidate_tlb()
nkeynes@550
   266
{
nkeynes@570
   267
    int i;
nkeynes@570
   268
    for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
nkeynes@570
   269
	mmu_itlb[i].flags &= (~TLB_VALID);
nkeynes@570
   270
    }
nkeynes@570
   271
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@570
   272
	mmu_utlb[i].flags &= (~TLB_VALID);
nkeynes@570
   273
    }
nkeynes@570
   274
}
nkeynes@570
   275
nkeynes@570
   276
#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
nkeynes@570
   277
nkeynes@570
   278
int32_t mmu_itlb_addr_read( sh4addr_t addr )
nkeynes@570
   279
{
nkeynes@570
   280
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@570
   281
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
nkeynes@570
   282
}
nkeynes@570
   283
int32_t mmu_itlb_data_read( sh4addr_t addr )
nkeynes@570
   284
{
nkeynes@570
   285
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@570
   286
    return ent->ppn | ent->flags;
nkeynes@570
   287
}
nkeynes@570
   288
nkeynes@570
   289
void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@570
   290
{
nkeynes@570
   291
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@570
   292
    ent->vpn = val & 0xFFFFFC00;
nkeynes@570
   293
    ent->asid = val & 0x000000FF;
nkeynes@570
   294
    ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
nkeynes@570
   295
}
nkeynes@570
   296
nkeynes@570
   297
void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@570
   298
{
nkeynes@570
   299
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@570
   300
    ent->ppn = val & 0x1FFFFC00;
nkeynes@570
   301
    ent->flags = val & 0x00001DA;
nkeynes@570
   302
    ent->mask = get_mask_for_flags(val);
nkeynes@570
   303
}
nkeynes@570
   304
nkeynes@570
   305
#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
nkeynes@570
   306
#define UTLB_ASSOC(addr) (addr&0x80)
nkeynes@570
   307
#define UTLB_DATA2(addr) (addr&0x00800000)
nkeynes@570
   308
nkeynes@570
   309
int32_t mmu_utlb_addr_read( sh4addr_t addr )
nkeynes@570
   310
{
nkeynes@570
   311
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@570
   312
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
nkeynes@570
   313
	((ent->flags & TLB_DIRTY)<<7);
nkeynes@570
   314
}
nkeynes@570
   315
int32_t mmu_utlb_data_read( sh4addr_t addr )
nkeynes@570
   316
{
nkeynes@570
   317
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@570
   318
    if( UTLB_DATA2(addr) ) {
nkeynes@570
   319
	return ent->pcmcia;
nkeynes@570
   320
    } else {
nkeynes@570
   321
	return ent->ppn | ent->flags;
nkeynes@559
   322
    }
nkeynes@559
   323
}
nkeynes@559
   324
nkeynes@559
   325
/**
nkeynes@570
   326
 * Find a UTLB entry for the associative TLB write - same as the normal
nkeynes@570
   327
 * lookup but ignores the valid bit.
nkeynes@570
   328
 */
nkeynes@570
   329
static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@570
   330
{
nkeynes@570
   331
    int result = -1;
nkeynes@570
   332
    unsigned int i;
nkeynes@570
   333
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@570
   334
	if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
nkeynes@570
   335
	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@570
   336
	    if( result != -1 ) {
nkeynes@570
   337
		return -2;
nkeynes@570
   338
	    }
nkeynes@570
   339
	    result = i;
nkeynes@570
   340
	}
nkeynes@570
   341
    }
nkeynes@570
   342
    return result;
nkeynes@570
   343
}
nkeynes@570
   344
nkeynes@570
   345
/**
nkeynes@570
   346
 * Find a ITLB entry for the associative TLB write - same as the normal
nkeynes@570
   347
 * lookup but ignores the valid bit.
nkeynes@570
   348
 */
nkeynes@570
   349
static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@570
   350
{
nkeynes@570
   351
    int result = -1;
nkeynes@570
   352
    unsigned int i;
nkeynes@570
   353
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@570
   354
	if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
nkeynes@570
   355
	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@570
   356
	    if( result != -1 ) {
nkeynes@570
   357
		return -2;
nkeynes@570
   358
	    }
nkeynes@570
   359
	    result = i;
nkeynes@570
   360
	}
nkeynes@570
   361
    }
nkeynes@570
   362
    return result;
nkeynes@570
   363
}
nkeynes@570
   364
nkeynes@570
   365
void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@570
   366
{
nkeynes@570
   367
    if( UTLB_ASSOC(addr) ) {
nkeynes@570
   368
	uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
nkeynes@570
   369
	int utlb = mmu_utlb_lookup_assoc( val, asid );
nkeynes@570
   370
	if( utlb >= 0 ) {
nkeynes@570
   371
	    struct utlb_entry *ent = &mmu_utlb[utlb];
nkeynes@570
   372
	    ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
nkeynes@570
   373
	    ent->flags |= (val & TLB_VALID);
nkeynes@570
   374
	    ent->flags |= ((val & 0x200)>>7);
nkeynes@570
   375
	}
nkeynes@570
   376
nkeynes@570
   377
	int itlb = mmu_itlb_lookup_assoc( val, asid );
nkeynes@570
   378
	if( itlb >= 0 ) {
nkeynes@570
   379
	    struct itlb_entry *ent = &mmu_itlb[itlb];
nkeynes@570
   380
	    ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
nkeynes@570
   381
	}
nkeynes@570
   382
nkeynes@570
   383
	if( itlb == -2 || utlb == -2 ) {
nkeynes@570
   384
	    MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@570
   385
	    return;
nkeynes@570
   386
	}
nkeynes@570
   387
    } else {
nkeynes@570
   388
	struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@570
   389
	ent->vpn = (val & 0xFFFFFC00);
nkeynes@570
   390
	ent->asid = (val & 0xFF);
nkeynes@570
   391
	ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
nkeynes@570
   392
	ent->flags |= (val & TLB_VALID);
nkeynes@570
   393
	ent->flags |= ((val & 0x200)>>7);
nkeynes@570
   394
    }
nkeynes@570
   395
}
nkeynes@570
   396
nkeynes@570
   397
void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@570
   398
{
nkeynes@570
   399
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@570
   400
    if( UTLB_DATA2(addr) ) {
nkeynes@570
   401
	ent->pcmcia = val & 0x0000000F;
nkeynes@570
   402
    } else {
nkeynes@570
   403
	ent->ppn = (val & 0x1FFFFC00);
nkeynes@570
   404
	ent->flags = (val & 0x000001FF);
nkeynes@570
   405
	ent->mask = get_mask_for_flags(val);
nkeynes@570
   406
    }
nkeynes@570
   407
}
nkeynes@570
   408
nkeynes@570
   409
/* Cache access - not implemented */
nkeynes@570
   410
nkeynes@570
   411
int32_t mmu_icache_addr_read( sh4addr_t addr )
nkeynes@570
   412
{
nkeynes@570
   413
    return 0; // not implemented
nkeynes@570
   414
}
nkeynes@570
   415
int32_t mmu_icache_data_read( sh4addr_t addr )
nkeynes@570
   416
{
nkeynes@570
   417
    return 0; // not implemented
nkeynes@570
   418
}
nkeynes@570
   419
int32_t mmu_ocache_addr_read( sh4addr_t addr )
nkeynes@570
   420
{
nkeynes@570
   421
    return 0; // not implemented
nkeynes@570
   422
}
nkeynes@570
   423
int32_t mmu_ocache_data_read( sh4addr_t addr )
nkeynes@570
   424
{
nkeynes@570
   425
    return 0; // not implemented
nkeynes@570
   426
}
nkeynes@570
   427
nkeynes@570
   428
void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@570
   429
{
nkeynes@570
   430
}
nkeynes@570
   431
nkeynes@570
   432
void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
nkeynes@570
   433
{
nkeynes@570
   434
}
nkeynes@570
   435
nkeynes@570
   436
void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@570
   437
{
nkeynes@570
   438
}
nkeynes@570
   439
nkeynes@570
   440
void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
nkeynes@570
   441
{
nkeynes@570
   442
}
nkeynes@570
   443
nkeynes@570
   444
/******************************************************************************/
nkeynes@570
   445
/*                        MMU TLB address translation                         */
nkeynes@570
   446
/******************************************************************************/
nkeynes@570
   447
nkeynes@570
   448
/**
nkeynes@559
   449
 * The translations are excessively complicated, but unfortunately it's a 
nkeynes@570
   450
 * complicated system. TODO: make this not be painfully slow.
nkeynes@559
   451
 */
nkeynes@559
   452
nkeynes@559
   453
/**
nkeynes@569
   454
 * Perform the actual utlb lookup w/ asid matching.
nkeynes@559
   455
 * Possible utcomes are:
nkeynes@559
   456
 *   0..63 Single match - good, return entry found
nkeynes@559
   457
 *   -1 No match - raise a tlb data miss exception
nkeynes@559
   458
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@559
   459
 * @param vpn virtual address to resolve
nkeynes@559
   460
 * @return the resultant UTLB entry, or an error.
nkeynes@559
   461
 */
nkeynes@569
   462
static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
nkeynes@559
   463
{
nkeynes@559
   464
    int result = -1;
nkeynes@559
   465
    unsigned int i;
nkeynes@559
   466
nkeynes@559
   467
    mmu_urc++;
nkeynes@559
   468
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@559
   469
	mmu_urc = 0;
nkeynes@559
   470
    }
nkeynes@559
   471
nkeynes@569
   472
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@569
   473
	if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@569
   474
	    ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) && 
nkeynes@569
   475
	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@569
   476
	    if( result != -1 ) {
nkeynes@569
   477
		return -2;
nkeynes@550
   478
	    }
nkeynes@569
   479
	    result = i;
nkeynes@550
   480
	}
nkeynes@550
   481
    }
nkeynes@559
   482
    return result;
nkeynes@559
   483
}
nkeynes@559
   484
nkeynes@559
   485
/**
nkeynes@569
   486
 * Perform the actual utlb lookup matching on vpn only
nkeynes@569
   487
 * Possible utcomes are:
nkeynes@569
   488
 *   0..63 Single match - good, return entry found
nkeynes@569
   489
 *   -1 No match - raise a tlb data miss exception
nkeynes@569
   490
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@569
   491
 * @param vpn virtual address to resolve
nkeynes@569
   492
 * @return the resultant UTLB entry, or an error.
nkeynes@569
   493
 */
nkeynes@569
   494
static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
nkeynes@569
   495
{
nkeynes@569
   496
    int result = -1;
nkeynes@569
   497
    unsigned int i;
nkeynes@569
   498
nkeynes@569
   499
    mmu_urc++;
nkeynes@569
   500
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@569
   501
	mmu_urc = 0;
nkeynes@569
   502
    }
nkeynes@569
   503
nkeynes@569
   504
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@569
   505
	if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@569
   506
	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@569
   507
	    if( result != -1 ) {
nkeynes@569
   508
		return -2;
nkeynes@569
   509
	    }
nkeynes@569
   510
	    result = i;
nkeynes@569
   511
	}
nkeynes@569
   512
    }
nkeynes@569
   513
nkeynes@569
   514
    return result;
nkeynes@569
   515
}
nkeynes@569
   516
nkeynes@569
   517
/**
nkeynes@569
   518
 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
nkeynes@569
   519
 * @return the number (0-3) of the replaced entry.
nkeynes@559
   520
 */
nkeynes@559
   521
static int inline mmu_itlb_update_from_utlb( int entryNo )
nkeynes@559
   522
{
nkeynes@559
   523
    int replace;
nkeynes@559
   524
    /* Determine entry to replace based on lrui */
nkeynes@559
   525
    if( mmu_lrui & 0x38 == 0x38 ) {
nkeynes@559
   526
	replace = 0;
nkeynes@559
   527
	mmu_lrui = mmu_lrui & 0x07;
nkeynes@559
   528
    } else if( (mmu_lrui & 0x26) == 0x06 ) {
nkeynes@559
   529
	replace = 1;
nkeynes@559
   530
	mmu_lrui = (mmu_lrui & 0x19) | 0x20;
nkeynes@559
   531
    } else if( (mmu_lrui & 0x15) == 0x01 ) {
nkeynes@559
   532
	replace = 2;
nkeynes@559
   533
	mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
nkeynes@559
   534
    } else { // Note - gets invalid entries too
nkeynes@559
   535
	replace = 3;
nkeynes@559
   536
	mmu_lrui = (mmu_lrui | 0x0B);
nkeynes@559
   537
    } 
nkeynes@559
   538
nkeynes@559
   539
    mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
nkeynes@559
   540
    mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
nkeynes@559
   541
    mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
nkeynes@559
   542
    mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
nkeynes@559
   543
    mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
nkeynes@559
   544
    return replace;
nkeynes@559
   545
}
nkeynes@559
   546
nkeynes@559
   547
/**
nkeynes@569
   548
 * Perform the actual itlb lookup w/ asid protection
nkeynes@569
   549
 * Possible utcomes are:
nkeynes@569
   550
 *   0..63 Single match - good, return entry found
nkeynes@569
   551
 *   -1 No match - raise a tlb data miss exception
nkeynes@569
   552
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@569
   553
 * @param vpn virtual address to resolve
nkeynes@569
   554
 * @return the resultant ITLB entry, or an error.
nkeynes@569
   555
 */
nkeynes@569
   556
static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
nkeynes@569
   557
{
nkeynes@569
   558
    int result = -1;
nkeynes@569
   559
    unsigned int i;
nkeynes@569
   560
nkeynes@569
   561
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@569
   562
	if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@569
   563
	    ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) && 
nkeynes@569
   564
	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@569
   565
	    if( result != -1 ) {
nkeynes@569
   566
		return -2;
nkeynes@569
   567
	    }
nkeynes@569
   568
	    result = i;
nkeynes@569
   569
	}
nkeynes@569
   570
    }
nkeynes@569
   571
nkeynes@569
   572
    if( result == -1 ) {
nkeynes@569
   573
	int utlbEntry = mmu_utlb_lookup_vpn( vpn );
nkeynes@569
   574
	if( utlbEntry == -1 ) {
nkeynes@569
   575
	    return -1;
nkeynes@569
   576
	} else {
nkeynes@569
   577
	    return mmu_itlb_update_from_utlb( utlbEntry );
nkeynes@569
   578
	}
nkeynes@569
   579
    }
nkeynes@569
   580
nkeynes@569
   581
    switch( result ) {
nkeynes@569
   582
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@569
   583
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@569
   584
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@569
   585
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@569
   586
    }
nkeynes@569
   587
	
nkeynes@569
   588
    return result;
nkeynes@569
   589
}
nkeynes@569
   590
nkeynes@569
   591
/**
nkeynes@569
   592
 * Perform the actual itlb lookup on vpn only
nkeynes@569
   593
 * Possible utcomes are:
nkeynes@569
   594
 *   0..63 Single match - good, return entry found
nkeynes@569
   595
 *   -1 No match - raise a tlb data miss exception
nkeynes@569
   596
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@569
   597
 * @param vpn virtual address to resolve
nkeynes@569
   598
 * @return the resultant ITLB entry, or an error.
nkeynes@569
   599
 */
nkeynes@569
   600
static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
nkeynes@569
   601
{
nkeynes@569
   602
    int result = -1;
nkeynes@569
   603
    unsigned int i;
nkeynes@569
   604
nkeynes@569
   605
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@569
   606
	if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@569
   607
	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@569
   608
	    if( result != -1 ) {
nkeynes@569
   609
		return -2;
nkeynes@569
   610
	    }
nkeynes@569
   611
	    result = i;
nkeynes@569
   612
	}
nkeynes@569
   613
    }
nkeynes@569
   614
nkeynes@569
   615
    if( result == -1 ) {
nkeynes@569
   616
	int utlbEntry = mmu_utlb_lookup_vpn( vpn );
nkeynes@569
   617
	if( utlbEntry == -1 ) {
nkeynes@569
   618
	    return -1;
nkeynes@569
   619
	} else {
nkeynes@569
   620
	    return mmu_itlb_update_from_utlb( utlbEntry );
nkeynes@569
   621
	}
nkeynes@569
   622
    }
nkeynes@569
   623
nkeynes@569
   624
    switch( result ) {
nkeynes@569
   625
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@569
   626
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@569
   627
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@569
   628
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@569
   629
    }
nkeynes@569
   630
	
nkeynes@569
   631
    return result;
nkeynes@569
   632
}
nkeynes@569
   633
nkeynes@570
   634
sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
nkeynes@559
   635
{
nkeynes@559
   636
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@559
   637
    if( addr & 0x80000000 ) {
nkeynes@559
   638
	if( IS_SH4_PRIVMODE() ) {
nkeynes@570
   639
	    if( addr >= 0xE0000000 ) {
nkeynes@570
   640
		return addr; /* P4 - passthrough */
nkeynes@570
   641
	    } else if( addr < 0xC0000000 ) {
nkeynes@570
   642
		/* P1, P2 regions are pass-through (no translation) */
nkeynes@570
   643
		return VMA_TO_EXT_ADDR(addr);
nkeynes@559
   644
	    }
nkeynes@559
   645
	} else {
nkeynes@559
   646
	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@559
   647
		((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@559
   648
		/* Conditional user-mode access to the store-queue (no translation) */
nkeynes@570
   649
		return addr;
nkeynes@559
   650
	    }
nkeynes@570
   651
	    MMU_READ_ADDR_ERROR();
nkeynes@570
   652
	    return MMU_VMA_ERROR;
nkeynes@559
   653
	}
nkeynes@559
   654
    }
nkeynes@559
   655
    
nkeynes@559
   656
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@570
   657
	return VMA_TO_EXT_ADDR(addr);
nkeynes@570
   658
    }
nkeynes@570
   659
nkeynes@570
   660
    /* If we get this far, translation is required */
nkeynes@570
   661
    int entryNo;
nkeynes@570
   662
    if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@570
   663
	entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@570
   664
    } else {
nkeynes@570
   665
	entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@570
   666
    }
nkeynes@570
   667
nkeynes@570
   668
    switch(entryNo) {
nkeynes@570
   669
    case -1:
nkeynes@570
   670
	MMU_TLB_READ_MISS_ERROR(addr);
nkeynes@570
   671
	return MMU_VMA_ERROR;
nkeynes@570
   672
    case -2:
nkeynes@570
   673
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@570
   674
	return MMU_VMA_ERROR;
nkeynes@570
   675
    default:
nkeynes@570
   676
	if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
nkeynes@570
   677
	    !IS_SH4_PRIVMODE() ) {
nkeynes@570
   678
	    /* protection violation */
nkeynes@570
   679
	    MMU_TLB_READ_PROT_ERROR(addr);
nkeynes@570
   680
	    return MMU_VMA_ERROR;
nkeynes@570
   681
	}
nkeynes@570
   682
nkeynes@570
   683
	/* finally generate the target address */
nkeynes@570
   684
	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
nkeynes@570
   685
	    (addr & (~mmu_utlb[entryNo].mask));
nkeynes@570
   686
    }
nkeynes@570
   687
}
nkeynes@570
   688
nkeynes@570
   689
sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
nkeynes@570
   690
{
nkeynes@570
   691
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@570
   692
    if( addr & 0x80000000 ) {
nkeynes@570
   693
	if( IS_SH4_PRIVMODE() ) {
nkeynes@570
   694
	    if( addr >= 0xE0000000 ) {
nkeynes@570
   695
		return addr; /* P4 - passthrough */
nkeynes@570
   696
	    } else if( addr < 0xC0000000 ) {
nkeynes@570
   697
		/* P1, P2 regions are pass-through (no translation) */
nkeynes@570
   698
		return VMA_TO_EXT_ADDR(addr);
nkeynes@570
   699
	    }
nkeynes@570
   700
	} else {
nkeynes@570
   701
	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@570
   702
		((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@570
   703
		/* Conditional user-mode access to the store-queue (no translation) */
nkeynes@570
   704
		return addr;
nkeynes@570
   705
	    }
nkeynes@570
   706
	    MMU_WRITE_ADDR_ERROR();
nkeynes@570
   707
	    return MMU_VMA_ERROR;
nkeynes@570
   708
	}
nkeynes@570
   709
    }
nkeynes@570
   710
    
nkeynes@570
   711
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@570
   712
	return VMA_TO_EXT_ADDR(addr);
nkeynes@559
   713
    }
nkeynes@559
   714
nkeynes@559
   715
    /* If we get this far, translation is required */
nkeynes@569
   716
    int entryNo;
nkeynes@569
   717
    if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@569
   718
	entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@569
   719
    } else {
nkeynes@569
   720
	entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@569
   721
    }
nkeynes@559
   722
nkeynes@559
   723
    switch(entryNo) {
nkeynes@559
   724
    case -1:
nkeynes@559
   725
	MMU_TLB_WRITE_MISS_ERROR(addr);
nkeynes@570
   726
	return MMU_VMA_ERROR;
nkeynes@559
   727
    case -2:
nkeynes@559
   728
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@570
   729
	return MMU_VMA_ERROR;
nkeynes@559
   730
    default:
nkeynes@559
   731
	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
nkeynes@559
   732
	    : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
nkeynes@559
   733
	    /* protection violation */
nkeynes@559
   734
	    MMU_TLB_WRITE_PROT_ERROR(addr);
nkeynes@570
   735
	    return MMU_VMA_ERROR;
nkeynes@559
   736
	}
nkeynes@559
   737
nkeynes@559
   738
	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
nkeynes@559
   739
	    MMU_TLB_INITIAL_WRITE_ERROR(addr);
nkeynes@570
   740
	    return MMU_VMA_ERROR;
nkeynes@559
   741
	}
nkeynes@559
   742
nkeynes@559
   743
	/* finally generate the target address */
nkeynes@559
   744
	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
nkeynes@559
   745
	    (addr & (~mmu_utlb[entryNo].mask));
nkeynes@559
   746
    }
nkeynes@550
   747
}
nkeynes@569
   748
nkeynes@569
   749
/**
nkeynes@569
   750
 * Update the icache for an untranslated address
nkeynes@569
   751
 */
nkeynes@569
   752
void mmu_update_icache_phys( sh4addr_t addr )
nkeynes@569
   753
{
nkeynes@569
   754
    if( (addr & 0x1C000000) == 0x0C000000 ) {
nkeynes@569
   755
	/* Main ram */
nkeynes@569
   756
	sh4_icache.page_vma = addr & 0xFF000000;
nkeynes@569
   757
	sh4_icache.page_ppa = 0x0C000000;
nkeynes@569
   758
	sh4_icache.mask = 0xFF000000;
nkeynes@569
   759
	sh4_icache.page = sh4_main_ram;
nkeynes@570
   760
    } else if( (addr & 0x1FE00000) == 0 ) {
nkeynes@569
   761
	/* BIOS ROM */
nkeynes@569
   762
	sh4_icache.page_vma = addr & 0xFFE00000;
nkeynes@569
   763
	sh4_icache.page_ppa = 0;
nkeynes@569
   764
	sh4_icache.mask = 0xFFE00000;
nkeynes@569
   765
	sh4_icache.page = mem_get_region(0);
nkeynes@569
   766
    } else {
nkeynes@569
   767
	/* not supported */
nkeynes@569
   768
	sh4_icache.page_vma = -1;
nkeynes@569
   769
    }
nkeynes@569
   770
}
nkeynes@569
   771
nkeynes@569
   772
/**
nkeynes@569
   773
 * Update the sh4_icache structure to describe the page(s) containing the
nkeynes@569
   774
 * given vma. If the address does not reference a RAM/ROM region, the icache
nkeynes@569
   775
 * will be invalidated instead.
nkeynes@569
   776
 * If AT is on, this method will raise TLB exceptions normally
nkeynes@569
   777
 * (hence this method should only be used immediately prior to execution of
nkeynes@569
   778
 * code), and otherwise will set the icache according to the matching TLB entry.
nkeynes@569
   779
 * If AT is off, this method will set the entire referenced RAM/ROM region in
nkeynes@569
   780
 * the icache.
nkeynes@569
   781
 * @return TRUE if the update completed (successfully or otherwise), FALSE
nkeynes@569
   782
 * if an exception was raised.
nkeynes@569
   783
 */
nkeynes@569
   784
gboolean mmu_update_icache( sh4vma_t addr )
nkeynes@569
   785
{
nkeynes@569
   786
    int entryNo;
nkeynes@569
   787
    if( IS_SH4_PRIVMODE()  ) {
nkeynes@569
   788
	if( addr & 0x80000000 ) {
nkeynes@569
   789
	    if( addr < 0xC0000000 ) {
nkeynes@569
   790
		/* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@569
   791
		mmu_update_icache_phys(addr);
nkeynes@569
   792
		return TRUE;
nkeynes@569
   793
	    } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
nkeynes@569
   794
		MMU_READ_ADDR_ERROR();
nkeynes@569
   795
		return FALSE;
nkeynes@569
   796
	    }
nkeynes@569
   797
	}
nkeynes@569
   798
    
nkeynes@569
   799
	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@569
   800
	if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@569
   801
	    mmu_update_icache_phys(addr);
nkeynes@569
   802
	    return TRUE;
nkeynes@569
   803
	}
nkeynes@569
   804
nkeynes@569
   805
	entryNo = mmu_itlb_lookup_vpn( addr );
nkeynes@569
   806
    } else {
nkeynes@569
   807
	if( addr & 0x80000000 ) {
nkeynes@569
   808
	    MMU_READ_ADDR_ERROR();
nkeynes@569
   809
	    return FALSE;
nkeynes@569
   810
	}
nkeynes@569
   811
nkeynes@569
   812
	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@569
   813
	if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@569
   814
	    mmu_update_icache_phys(addr);
nkeynes@569
   815
	    return TRUE;
nkeynes@569
   816
	}
nkeynes@569
   817
	
nkeynes@569
   818
	if( mmucr & MMUCR_SV ) {
nkeynes@569
   819
	    entryNo = mmu_itlb_lookup_vpn( addr );
nkeynes@569
   820
	} else {
nkeynes@569
   821
	    entryNo = mmu_itlb_lookup_vpn_asid( addr );
nkeynes@569
   822
	}
nkeynes@569
   823
	if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
nkeynes@569
   824
	    MMU_TLB_READ_PROT_ERROR(addr);
nkeynes@569
   825
	    return FALSE;
nkeynes@569
   826
	}
nkeynes@569
   827
    }
nkeynes@569
   828
nkeynes@569
   829
    switch(entryNo) {
nkeynes@569
   830
    case -1:
nkeynes@569
   831
	MMU_TLB_READ_MISS_ERROR(addr);
nkeynes@569
   832
	return FALSE;
nkeynes@569
   833
    case -2:
nkeynes@569
   834
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@569
   835
	return FALSE;
nkeynes@569
   836
    default:
nkeynes@569
   837
	sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
nkeynes@569
   838
	sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
nkeynes@569
   839
	if( sh4_icache.page == NULL ) {
nkeynes@569
   840
	    sh4_icache.page_vma = -1;
nkeynes@569
   841
	} else {
nkeynes@569
   842
	    sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
nkeynes@569
   843
	    sh4_icache.mask = mmu_itlb[entryNo].mask;
nkeynes@569
   844
	}
nkeynes@569
   845
	return TRUE;
nkeynes@569
   846
    }
nkeynes@569
   847
}
nkeynes@577
   848
nkeynes@577
   849
gboolean sh4_flush_store_queue( sh4addr_t addr )
nkeynes@577
   850
{
nkeynes@577
   851
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@577
   852
    int queue = (addr&0x20)>>2;
nkeynes@577
   853
    sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
nkeynes@577
   854
    sh4addr_t target;
nkeynes@577
   855
    /* Store queue operation */
nkeynes@577
   856
    if( mmucr & MMUCR_AT ) {
nkeynes@577
   857
	int entryNo;
nkeynes@577
   858
	if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@577
   859
	    entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@577
   860
	} else {
nkeynes@577
   861
	    entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@577
   862
	}
nkeynes@577
   863
	switch(entryNo) {
nkeynes@577
   864
	case -1:
nkeynes@577
   865
	    MMU_TLB_WRITE_MISS_ERROR(addr);
nkeynes@577
   866
	    return FALSE;
nkeynes@577
   867
	case -2:
nkeynes@577
   868
	    MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@577
   869
	    return FALSE;
nkeynes@577
   870
	default:
nkeynes@577
   871
	    if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
nkeynes@577
   872
		: ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
nkeynes@577
   873
		/* protection violation */
nkeynes@577
   874
		MMU_TLB_WRITE_PROT_ERROR(addr);
nkeynes@577
   875
		return FALSE;
nkeynes@577
   876
	    }
nkeynes@577
   877
	    
nkeynes@577
   878
	    if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
nkeynes@577
   879
		MMU_TLB_INITIAL_WRITE_ERROR(addr);
nkeynes@577
   880
		return FALSE;
nkeynes@577
   881
	    }
nkeynes@577
   882
	    
nkeynes@577
   883
	    /* finally generate the target address */
nkeynes@577
   884
	    target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
nkeynes@577
   885
		      (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
nkeynes@577
   886
	}
nkeynes@577
   887
    } else {
nkeynes@577
   888
	uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
nkeynes@577
   889
	target = (addr&0x03FFFFE0) | hi;
nkeynes@577
   890
    }
nkeynes@577
   891
    mem_copy_to_sh4( target, src, 32 );
nkeynes@577
   892
    return TRUE;
nkeynes@577
   893
}
nkeynes@577
   894
.