Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 1217:677b1d85f1b4
prev1202:01ae5cbad4c8
next1295:9067aff5522d
author nkeynes
date Fri Aug 24 08:53:50 2012 +1000 (11 years ago)
permissions -rw-r--r--
last change Move the generated prologue/epilogue code out into a common entry stub
(reduces space requirements) and pre-save all saved registers. Change
FASTCALL to use 3 regs instead of 2 since we can now keep everything in
regs.
file annotate diff log raw
nkeynes@550
     1
/**
nkeynes@586
     2
 * $Id$
nkeynes@826
     3
 *
nkeynes@939
     4
 * SH4 MMU implementation based on address space page maps. This module
nkeynes@939
     5
 * is responsible for all address decoding functions. 
nkeynes@550
     6
 *
nkeynes@550
     7
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@550
     8
 *
nkeynes@550
     9
 * This program is free software; you can redistribute it and/or modify
nkeynes@550
    10
 * it under the terms of the GNU General Public License as published by
nkeynes@550
    11
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@550
    12
 * (at your option) any later version.
nkeynes@550
    13
 *
nkeynes@550
    14
 * This program is distributed in the hope that it will be useful,
nkeynes@550
    15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@550
    16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@550
    17
 * GNU General Public License for more details.
nkeynes@550
    18
 */
nkeynes@550
    19
#define MODULE sh4_module
nkeynes@550
    20
nkeynes@550
    21
#include <stdio.h>
nkeynes@915
    22
#include <assert.h>
nkeynes@550
    23
#include "sh4/sh4mmio.h"
nkeynes@550
    24
#include "sh4/sh4core.h"
nkeynes@669
    25
#include "sh4/sh4trans.h"
nkeynes@934
    26
#include "dreamcast.h"
nkeynes@550
    27
#include "mem.h"
nkeynes@931
    28
#include "mmu.h"
nkeynes@550
    29
nkeynes@939
    30
/* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
nkeynes@939
    31
#define IS_1K_PAGE_ENTRY(ent)  ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
nkeynes@586
    32
nkeynes@939
    33
/* Primary address space (used directly by SH4 cores) */
nkeynes@939
    34
mem_region_fn_t *sh4_address_space;
nkeynes@939
    35
mem_region_fn_t *sh4_user_address_space;
nkeynes@550
    36
nkeynes@1217
    37
/* External address space (usually the same as the global ext_address_space) */
nkeynes@1217
    38
static mem_region_fn_t *sh4_ext_address_space;
nkeynes@1217
    39
nkeynes@939
    40
/* Accessed from the UTLB accessor methods */
nkeynes@939
    41
uint32_t mmu_urc;
nkeynes@939
    42
uint32_t mmu_urb;
nkeynes@952
    43
static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */  
nkeynes@939
    44
nkeynes@939
    45
/* Module globals */
nkeynes@550
    46
static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
nkeynes@550
    47
static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
nkeynes@939
    48
static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
nkeynes@550
    49
static uint32_t mmu_lrui;
nkeynes@586
    50
static uint32_t mmu_asid; // current asid
nkeynes@946
    51
static struct utlb_default_regions *mmu_user_storequeue_regions;
nkeynes@550
    52
nkeynes@939
    53
/* Structures for 1K page handling */
nkeynes@939
    54
static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
nkeynes@939
    55
static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
nkeynes@939
    56
static int mmu_utlb_1k_free_index;
nkeynes@915
    57
nkeynes@550
    58
nkeynes@939
    59
/* Function prototypes */
nkeynes@550
    60
static void mmu_invalidate_tlb();
nkeynes@939
    61
static void mmu_utlb_register_all();
nkeynes@939
    62
static void mmu_utlb_remove_entry(int);
nkeynes@939
    63
static void mmu_utlb_insert_entry(int);
nkeynes@939
    64
static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
nkeynes@939
    65
static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
nkeynes@939
    66
static void mmu_set_tlb_enabled( int tlb_on );
nkeynes@939
    67
static void mmu_set_tlb_asid( uint32_t asid );
nkeynes@946
    68
static void mmu_set_storequeue_protected( int protected, int tlb_on );
nkeynes@939
    69
static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
nkeynes@943
    70
static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
nkeynes@943
    71
static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
nkeynes@939
    72
static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
nkeynes@939
    73
static void mmu_utlb_1k_init();
nkeynes@939
    74
static struct utlb_1k_entry *mmu_utlb_1k_alloc();
nkeynes@939
    75
static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
nkeynes@955
    76
static int mmu_read_urc();
nkeynes@550
    77
nkeynes@946
    78
static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
nkeynes@939
    79
static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
nkeynes@939
    80
static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
nkeynes@975
    81
static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc );
nkeynes@939
    82
static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
nkeynes@975
    83
static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc );
nkeynes@939
    84
static uint32_t get_tlb_size_mask( uint32_t flags );
nkeynes@939
    85
static uint32_t get_tlb_size_pages( uint32_t flags );
nkeynes@586
    86
nkeynes@946
    87
#define DEFAULT_REGIONS 0
nkeynes@946
    88
#define DEFAULT_STOREQUEUE_REGIONS 1
nkeynes@946
    89
#define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
nkeynes@946
    90
nkeynes@946
    91
static struct utlb_default_regions mmu_default_regions[3] = {
nkeynes@946
    92
        { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
nkeynes@946
    93
        { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
nkeynes@946
    94
        { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
nkeynes@946
    95
nkeynes@946
    96
#define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
nkeynes@550
    97
nkeynes@1217
    98
#ifndef SH4_TRANSLATOR
nkeynes@1217
    99
/* Dummy MMU vtable functions */
nkeynes@1217
   100
void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
nkeynes@1217
   101
{
nkeynes@1217
   102
}
nkeynes@1217
   103
void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
nkeynes@1217
   104
{
nkeynes@1217
   105
}
nkeynes@1217
   106
void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
nkeynes@1217
   107
{
nkeynes@1217
   108
}
nkeynes@1217
   109
#endif
nkeynes@1217
   110
nkeynes@939
   111
/*********************** Module public functions ****************************/
nkeynes@550
   112
nkeynes@939
   113
/**
nkeynes@939
   114
 * Allocate memory for the address space maps, and initialize them according
nkeynes@939
   115
 * to the default (reset) values. (TLB is disabled by default)
nkeynes@939
   116
 */
nkeynes@939
   117
                           
nkeynes@826
   118
void MMU_init()
nkeynes@550
   119
{
nkeynes@1217
   120
    sh4_ext_address_space = ext_address_space;
nkeynes@939
   121
    sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
nkeynes@939
   122
    sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
nkeynes@946
   123
    mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
nkeynes@939
   124
    
nkeynes@939
   125
    mmu_set_tlb_enabled(0);
nkeynes@939
   126
    mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
nkeynes@946
   127
    mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );                                
nkeynes@939
   128
    
nkeynes@939
   129
    /* Setup P4 tlb/cache access regions */
nkeynes@939
   130
    mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
nkeynes@939
   131
    mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
nkeynes@939
   132
    mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
nkeynes@939
   133
    mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
nkeynes@939
   134
    mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
nkeynes@939
   135
    mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
nkeynes@939
   136
    mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
nkeynes@939
   137
    mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
nkeynes@939
   138
    mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
nkeynes@939
   139
    mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
nkeynes@939
   140
    mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
nkeynes@939
   141
    
nkeynes@939
   142
    /* Setup P4 control region */
nkeynes@939
   143
    mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
nkeynes@939
   144
    mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
nkeynes@939
   145
    mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
nkeynes@939
   146
    mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
nkeynes@939
   147
    mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
nkeynes@939
   148
    mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
nkeynes@939
   149
    mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
nkeynes@939
   150
    mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
nkeynes@939
   151
    mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
nkeynes@939
   152
    mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
nkeynes@939
   153
    mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
nkeynes@939
   154
    mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
nkeynes@939
   155
    mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
nkeynes@939
   156
    
nkeynes@939
   157
    register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
nkeynes@939
   158
    mmu_utlb_1k_init();
nkeynes@939
   159
    
nkeynes@1173
   160
    /* Ensure the code regions are executable. Although it might
nkeynes@960
   161
     * be more portable to mmap these at runtime rather than using static decls
nkeynes@960
   162
     */
nkeynes@939
   163
    mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
nkeynes@939
   164
    mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
nkeynes@550
   165
}
nkeynes@550
   166
nkeynes@550
   167
void MMU_reset()
nkeynes@550
   168
{
nkeynes@550
   169
    mmio_region_MMU_write( CCR, 0 );
nkeynes@586
   170
    mmio_region_MMU_write( MMUCR, 0 );
nkeynes@550
   171
}
nkeynes@550
   172
nkeynes@550
   173
void MMU_save_state( FILE *f )
nkeynes@550
   174
{
nkeynes@955
   175
    mmu_read_urc();   
nkeynes@550
   176
    fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
nkeynes@550
   177
    fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
nkeynes@586
   178
    fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
nkeynes@586
   179
    fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
nkeynes@586
   180
    fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
nkeynes@586
   181
    fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
nkeynes@550
   182
}
nkeynes@550
   183
nkeynes@550
   184
int MMU_load_state( FILE *f )
nkeynes@550
   185
{
nkeynes@550
   186
    if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
nkeynes@736
   187
        return 1;
nkeynes@550
   188
    }
nkeynes@550
   189
    if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
nkeynes@736
   190
        return 1;
nkeynes@550
   191
    }
nkeynes@586
   192
    if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
nkeynes@736
   193
        return 1;
nkeynes@586
   194
    }
nkeynes@586
   195
    if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
nkeynes@736
   196
        return 1;
nkeynes@586
   197
    }
nkeynes@586
   198
    if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
nkeynes@736
   199
        return 1;
nkeynes@586
   200
    }
nkeynes@586
   201
    if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
nkeynes@736
   202
        return 1;
nkeynes@586
   203
    }
nkeynes@939
   204
nkeynes@939
   205
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@952
   206
    mmu_urc_overflow = mmu_urc >= mmu_urb;
nkeynes@939
   207
    mmu_set_tlb_enabled(mmucr&MMUCR_AT);
nkeynes@946
   208
    mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
nkeynes@550
   209
    return 0;
nkeynes@550
   210
}
nkeynes@550
   211
nkeynes@550
   212
/**
nkeynes@550
   213
 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
nkeynes@550
   214
 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
nkeynes@550
   215
 */
nkeynes@550
   216
void MMU_ldtlb()
nkeynes@550
   217
{
nkeynes@955
   218
    int urc = mmu_read_urc();
nkeynes@1090
   219
    if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
nkeynes@955
   220
        mmu_utlb_remove_entry( urc );
nkeynes@955
   221
    mmu_utlb[urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
nkeynes@955
   222
    mmu_utlb[urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
nkeynes@955
   223
    mmu_utlb[urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
nkeynes@955
   224
    mmu_utlb[urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
nkeynes@955
   225
    mmu_utlb[urc].pcmcia = MMIO_READ(MMU, PTEA);
nkeynes@955
   226
    mmu_utlb[urc].mask = get_tlb_size_mask(mmu_utlb[urc].flags);
nkeynes@1090
   227
    if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
nkeynes@955
   228
        mmu_utlb_insert_entry( urc );
nkeynes@550
   229
}
nkeynes@550
   230
nkeynes@939
   231
nkeynes@939
   232
MMIO_REGION_READ_FN( MMU, reg )
nkeynes@939
   233
{
nkeynes@939
   234
    reg &= 0xFFF;
nkeynes@939
   235
    switch( reg ) {
nkeynes@939
   236
    case MMUCR:
nkeynes@955
   237
        return MMIO_READ( MMU, MMUCR) | (mmu_read_urc()<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
nkeynes@939
   238
    default:
nkeynes@939
   239
        return MMIO_READ( MMU, reg );
nkeynes@939
   240
    }
nkeynes@939
   241
}
nkeynes@939
   242
nkeynes@975
   243
MMIO_REGION_READ_DEFSUBFNS(MMU)
nkeynes@975
   244
nkeynes@939
   245
MMIO_REGION_WRITE_FN( MMU, reg, val )
nkeynes@939
   246
{
nkeynes@939
   247
    uint32_t tmp;
nkeynes@939
   248
    reg &= 0xFFF;
nkeynes@939
   249
    switch(reg) {
nkeynes@939
   250
    case SH4VER:
nkeynes@939
   251
        return;
nkeynes@939
   252
    case PTEH:
nkeynes@939
   253
        val &= 0xFFFFFCFF;
nkeynes@939
   254
        if( (val & 0xFF) != mmu_asid ) {
nkeynes@939
   255
            mmu_set_tlb_asid( val&0xFF );
nkeynes@939
   256
        }
nkeynes@939
   257
        break;
nkeynes@939
   258
    case PTEL:
nkeynes@939
   259
        val &= 0x1FFFFDFF;
nkeynes@939
   260
        break;
nkeynes@939
   261
    case PTEA:
nkeynes@939
   262
        val &= 0x0000000F;
nkeynes@939
   263
        break;
nkeynes@939
   264
    case TRA:
nkeynes@939
   265
        val &= 0x000003FC;
nkeynes@939
   266
        break;
nkeynes@939
   267
    case EXPEVT:
nkeynes@939
   268
    case INTEVT:
nkeynes@939
   269
        val &= 0x00000FFF;
nkeynes@939
   270
        break;
nkeynes@939
   271
    case MMUCR:
nkeynes@939
   272
        if( val & MMUCR_TI ) {
nkeynes@939
   273
            mmu_invalidate_tlb();
nkeynes@939
   274
        }
nkeynes@939
   275
        mmu_urc = (val >> 10) & 0x3F;
nkeynes@939
   276
        mmu_urb = (val >> 18) & 0x3F;
nkeynes@939
   277
        if( mmu_urb == 0 ) {
nkeynes@939
   278
            mmu_urb = 0x40;
nkeynes@952
   279
        } else if( mmu_urc >= mmu_urb ) {
nkeynes@952
   280
            mmu_urc_overflow = TRUE;
nkeynes@939
   281
        }
nkeynes@939
   282
        mmu_lrui = (val >> 26) & 0x3F;
nkeynes@939
   283
        val &= 0x00000301;
nkeynes@939
   284
        tmp = MMIO_READ( MMU, MMUCR );
nkeynes@939
   285
        if( (val ^ tmp) & (MMUCR_SQMD) ) {
nkeynes@946
   286
            mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
nkeynes@939
   287
        }
nkeynes@939
   288
        if( (val ^ tmp) & (MMUCR_AT) ) {
nkeynes@939
   289
            // AT flag has changed state - flush the xlt cache as all bets
nkeynes@939
   290
            // are off now. We also need to force an immediate exit from the
nkeynes@939
   291
            // current block
nkeynes@939
   292
            mmu_set_tlb_enabled( val & MMUCR_AT );
nkeynes@939
   293
            MMIO_WRITE( MMU, MMUCR, val );
nkeynes@948
   294
            sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
nkeynes@948
   295
            xlat_flush_cache(); // If we're not running, flush the cache anyway
nkeynes@939
   296
        }
nkeynes@939
   297
        break;
nkeynes@939
   298
    case CCR:
nkeynes@939
   299
        CCN_set_cache_control( val );
nkeynes@939
   300
        val &= 0x81A7;
nkeynes@939
   301
        break;
nkeynes@939
   302
    case MMUUNK1:
nkeynes@939
   303
        /* Note that if the high bit is set, this appears to reset the machine.
nkeynes@939
   304
         * Not emulating this behaviour yet until we know why...
nkeynes@939
   305
         */
nkeynes@939
   306
        val &= 0x00010007;
nkeynes@939
   307
        break;
nkeynes@939
   308
    case QACR0:
nkeynes@939
   309
    case QACR1:
nkeynes@939
   310
        val &= 0x0000001C;
nkeynes@939
   311
        break;
nkeynes@939
   312
    case PMCR1:
nkeynes@939
   313
        PMM_write_control(0, val);
nkeynes@939
   314
        val &= 0x0000C13F;
nkeynes@939
   315
        break;
nkeynes@939
   316
    case PMCR2:
nkeynes@939
   317
        PMM_write_control(1, val);
nkeynes@939
   318
        val &= 0x0000C13F;
nkeynes@939
   319
        break;
nkeynes@939
   320
    default:
nkeynes@939
   321
        break;
nkeynes@939
   322
    }
nkeynes@939
   323
    MMIO_WRITE( MMU, reg, val );
nkeynes@939
   324
}
nkeynes@939
   325
nkeynes@939
   326
/********************** 1K Page handling ***********************/
nkeynes@939
   327
/* Since we use 4K pages as our native page size, 1K pages need a bit of extra
nkeynes@939
   328
 * effort to manage - we justify this on the basis that most programs won't
nkeynes@939
   329
 * actually use 1K pages, so we may as well optimize for the common case.
nkeynes@939
   330
 * 
nkeynes@939
   331
 * Implementation uses an intermediate page entry (the utlb_1k_entry) that
nkeynes@939
   332
 * redirects requests to the 'real' page entry. These are allocated on an
nkeynes@939
   333
 * as-needed basis, and returned to the pool when all subpages are empty.
nkeynes@939
   334
 */ 
nkeynes@939
   335
static void mmu_utlb_1k_init()
nkeynes@939
   336
{
nkeynes@939
   337
    int i;
nkeynes@939
   338
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@939
   339
        mmu_utlb_1k_free_list[i] = i;
nkeynes@939
   340
        mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
nkeynes@939
   341
    }
nkeynes@939
   342
    mmu_utlb_1k_free_index = 0;
nkeynes@939
   343
}
nkeynes@939
   344
nkeynes@939
   345
static struct utlb_1k_entry *mmu_utlb_1k_alloc()
nkeynes@939
   346
{
nkeynes@939
   347
    assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
nkeynes@971
   348
    struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_list[mmu_utlb_1k_free_index++]];
nkeynes@939
   349
    return entry;
nkeynes@939
   350
}    
nkeynes@939
   351
nkeynes@939
   352
static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
nkeynes@939
   353
{
nkeynes@939
   354
    unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
nkeynes@939
   355
    assert( entryNo < UTLB_ENTRY_COUNT );
nkeynes@939
   356
    assert( mmu_utlb_1k_free_index > 0 );
nkeynes@939
   357
    mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
nkeynes@939
   358
}
nkeynes@939
   359
nkeynes@939
   360
nkeynes@939
   361
/********************** Address space maintenance *************************/
nkeynes@939
   362
nkeynes@1217
   363
mem_region_fn_t *mmu_set_ext_address_space( mem_region_fn_t *ext )
nkeynes@1217
   364
{
nkeynes@1217
   365
    mem_region_fn_t *old_ext = sh4_ext_address_space;
nkeynes@1217
   366
    sh4_ext_address_space = ext;
nkeynes@1217
   367
    mmu_set_tlb_enabled(IS_TLB_ENABLED());
nkeynes@1217
   368
    return old_ext;
nkeynes@1217
   369
}
nkeynes@1217
   370
nkeynes@939
   371
/**
nkeynes@939
   372
 * MMU accessor functions just increment URC - fixup here if necessary
nkeynes@939
   373
 */
nkeynes@955
   374
static int mmu_read_urc()
nkeynes@939
   375
{
nkeynes@952
   376
    if( mmu_urc_overflow ) {
nkeynes@952
   377
        if( mmu_urc >= 0x40 ) {
nkeynes@952
   378
            mmu_urc_overflow = FALSE;
nkeynes@952
   379
            mmu_urc -= 0x40;
nkeynes@952
   380
            mmu_urc %= mmu_urb;
nkeynes@952
   381
        }
nkeynes@952
   382
    } else {
nkeynes@952
   383
        mmu_urc %= mmu_urb;
nkeynes@952
   384
    }
nkeynes@955
   385
    return mmu_urc;
nkeynes@939
   386
}
nkeynes@939
   387
nkeynes@939
   388
static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
nkeynes@939
   389
{
nkeynes@939
   390
    int count = (end - start) >> 12;
nkeynes@939
   391
    mem_region_fn_t *ptr = &sh4_address_space[start>>12];
nkeynes@939
   392
    while( count-- > 0 ) {
nkeynes@939
   393
        *ptr++ = fn;
nkeynes@939
   394
    }
nkeynes@939
   395
}
nkeynes@939
   396
static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
nkeynes@939
   397
{
nkeynes@939
   398
    int count = (end - start) >> 12;
nkeynes@939
   399
    mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
nkeynes@939
   400
    while( count-- > 0 ) {
nkeynes@939
   401
        *ptr++ = fn;
nkeynes@939
   402
    }
nkeynes@939
   403
}
nkeynes@939
   404
nkeynes@939
   405
static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
nkeynes@939
   406
{
nkeynes@980
   407
    unsigned int i;
nkeynes@939
   408
    if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
nkeynes@939
   409
        /* TLB on */
nkeynes@939
   410
        sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
nkeynes@939
   411
        sh4_address_space[(page|0xA0000000)>>12] = fn;
nkeynes@939
   412
        /* Scan UTLB and update any direct-referencing entries */
nkeynes@939
   413
    } else {
nkeynes@939
   414
        /* Direct map to U0, P0, P1, P2, P3 */
nkeynes@939
   415
        for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
nkeynes@939
   416
            sh4_address_space[(page|i)>>12] = fn;
nkeynes@939
   417
        }
nkeynes@939
   418
        for( i=0; i < 0x80000000; i+= 0x20000000 ) {
nkeynes@939
   419
            sh4_user_address_space[(page|i)>>12] = fn;
nkeynes@939
   420
        }
nkeynes@939
   421
    }
nkeynes@963
   422
    return TRUE;
nkeynes@939
   423
}
nkeynes@939
   424
nkeynes@939
   425
static void mmu_set_tlb_enabled( int tlb_on )
nkeynes@939
   426
{
nkeynes@939
   427
    mem_region_fn_t *ptr, *uptr;
nkeynes@939
   428
    int i;
nkeynes@939
   429
    
nkeynes@946
   430
    /* Reset the storequeue area */
nkeynes@946
   431
nkeynes@939
   432
    if( tlb_on ) {
nkeynes@939
   433
        mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
nkeynes@939
   434
        mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
nkeynes@939
   435
        mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
nkeynes@946
   436
        
nkeynes@946
   437
        /* Default SQ prefetch goes to TLB miss (?) */
nkeynes@946
   438
        mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
nkeynes@946
   439
        mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
nkeynes@939
   440
        mmu_utlb_register_all();
nkeynes@939
   441
    } else {
nkeynes@939
   442
        for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
nkeynes@1217
   443
            memcpy( ptr, sh4_ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
nkeynes@939
   444
        }
nkeynes@939
   445
        for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
nkeynes@1217
   446
            memcpy( ptr, sh4_ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
nkeynes@939
   447
        }
nkeynes@946
   448
nkeynes@946
   449
        mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
nkeynes@946
   450
        if( IS_STOREQUEUE_PROTECTED() ) {
nkeynes@946
   451
            mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
nkeynes@946
   452
        } else {
nkeynes@946
   453
            mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
nkeynes@946
   454
        }
nkeynes@939
   455
    }
nkeynes@946
   456
    
nkeynes@939
   457
}
nkeynes@939
   458
nkeynes@946
   459
/**
nkeynes@946
   460
 * Flip the SQMD switch - this is rather expensive, so will need to be changed if
nkeynes@946
   461
 * anything expects to do this frequently.
nkeynes@946
   462
 */
nkeynes@946
   463
static void mmu_set_storequeue_protected( int protected, int tlb_on ) 
nkeynes@939
   464
{
nkeynes@946
   465
    mem_region_fn_t nontlb_region;
nkeynes@946
   466
    int i;
nkeynes@946
   467
nkeynes@939
   468
    if( protected ) {
nkeynes@946
   469
        mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
nkeynes@946
   470
        nontlb_region = &p4_region_storequeue_sqmd;
nkeynes@939
   471
    } else {
nkeynes@946
   472
        mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
nkeynes@946
   473
        nontlb_region = &p4_region_storequeue; 
nkeynes@939
   474
    }
nkeynes@946
   475
nkeynes@946
   476
    if( tlb_on ) {
nkeynes@946
   477
        mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
nkeynes@946
   478
        for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@946
   479
            if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
nkeynes@946
   480
                mmu_utlb_insert_entry(i);
nkeynes@946
   481
            }
nkeynes@946
   482
        }
nkeynes@946
   483
    } else {
nkeynes@946
   484
        mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region ); 
nkeynes@946
   485
    }
nkeynes@946
   486
    
nkeynes@939
   487
}
nkeynes@939
   488
nkeynes@939
   489
static void mmu_set_tlb_asid( uint32_t asid )
nkeynes@939
   490
{
nkeynes@1088
   491
    if( IS_TLB_ENABLED() ) {
nkeynes@1088
   492
        /* Scan for pages that need to be remapped */
nkeynes@1088
   493
        int i;
nkeynes@1088
   494
        if( IS_SV_ENABLED() ) {
nkeynes@1088
   495
            for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@1088
   496
                if( mmu_utlb[i].asid == mmu_asid &&
nkeynes@1088
   497
                        (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
nkeynes@1088
   498
                    // Matches old ASID - unmap out
nkeynes@1088
   499
                    if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
nkeynes@1088
   500
                            get_tlb_size_pages(mmu_utlb[i].flags) ) )
nkeynes@1088
   501
                        mmu_utlb_remap_pages( FALSE, TRUE, i );
nkeynes@1088
   502
                }
nkeynes@1088
   503
            }
nkeynes@1088
   504
            for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@1088
   505
                if( mmu_utlb[i].asid == asid &&
nkeynes@1088
   506
                        (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
nkeynes@1088
   507
                    // Matches new ASID - map in
nkeynes@1088
   508
                    mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
nkeynes@1088
   509
                            mmu_utlb[i].vpn&mmu_utlb[i].mask,
nkeynes@1088
   510
                            get_tlb_size_pages(mmu_utlb[i].flags) );
nkeynes@1088
   511
                }
nkeynes@1088
   512
            }
nkeynes@1088
   513
        } else {
nkeynes@1088
   514
            // Remap both Priv+user pages
nkeynes@1088
   515
            for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@1088
   516
                if( mmu_utlb[i].asid == mmu_asid &&
nkeynes@1088
   517
                        (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
nkeynes@1088
   518
                    if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
nkeynes@1088
   519
                            get_tlb_size_pages(mmu_utlb[i].flags) ) )
nkeynes@1088
   520
                        mmu_utlb_remap_pages( TRUE, TRUE, i );
nkeynes@1088
   521
                }
nkeynes@1088
   522
            }
nkeynes@1088
   523
            for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@1088
   524
                if( mmu_utlb[i].asid == asid &&
nkeynes@1088
   525
                        (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
nkeynes@1088
   526
                    mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
nkeynes@1088
   527
                            mmu_utlb[i].vpn&mmu_utlb[i].mask,
nkeynes@1088
   528
                            get_tlb_size_pages(mmu_utlb[i].flags) );
nkeynes@1088
   529
                }
nkeynes@971
   530
            }
nkeynes@971
   531
        }
nkeynes@1088
   532
        sh4_icache.page_vma = -1; // invalidate icache as asid has changed
nkeynes@939
   533
    }
nkeynes@939
   534
    mmu_asid = asid;
nkeynes@939
   535
}
nkeynes@939
   536
nkeynes@939
   537
static uint32_t get_tlb_size_mask( uint32_t flags )
nkeynes@939
   538
{
nkeynes@939
   539
    switch( flags & TLB_SIZE_MASK ) {
nkeynes@939
   540
    case TLB_SIZE_1K: return MASK_1K;
nkeynes@939
   541
    case TLB_SIZE_4K: return MASK_4K;
nkeynes@939
   542
    case TLB_SIZE_64K: return MASK_64K;
nkeynes@939
   543
    case TLB_SIZE_1M: return MASK_1M;
nkeynes@939
   544
    default: return 0; /* Unreachable */
nkeynes@939
   545
    }
nkeynes@939
   546
}
nkeynes@939
   547
static uint32_t get_tlb_size_pages( uint32_t flags )
nkeynes@939
   548
{
nkeynes@939
   549
    switch( flags & TLB_SIZE_MASK ) {
nkeynes@939
   550
    case TLB_SIZE_1K: return 0;
nkeynes@939
   551
    case TLB_SIZE_4K: return 1;
nkeynes@939
   552
    case TLB_SIZE_64K: return 16;
nkeynes@939
   553
    case TLB_SIZE_1M: return 256;
nkeynes@939
   554
    default: return 0; /* Unreachable */
nkeynes@939
   555
    }
nkeynes@939
   556
}
nkeynes@939
   557
nkeynes@939
   558
/**
nkeynes@939
   559
 * Add a new TLB entry mapping to the address space table. If any of the pages
nkeynes@939
   560
 * are already mapped, they are mapped to the TLB multi-hit page instead.
nkeynes@939
   561
 * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
nkeynes@939
   562
 */ 
nkeynes@939
   563
static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
nkeynes@939
   564
{
nkeynes@939
   565
    mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
nkeynes@939
   566
    mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
nkeynes@946
   567
    struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
nkeynes@946
   568
    struct utlb_default_regions *userdefs = privdefs;    
nkeynes@946
   569
    
nkeynes@939
   570
    gboolean mapping_ok = TRUE;
nkeynes@939
   571
    int i;
nkeynes@939
   572
    
nkeynes@939
   573
    if( (start_addr & 0xFC000000) == 0xE0000000 ) {
nkeynes@939
   574
        /* Storequeue mapping */
nkeynes@946
   575
        privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
nkeynes@946
   576
        userdefs = mmu_user_storequeue_regions;
nkeynes@939
   577
    } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
nkeynes@939
   578
        user_page = NULL; /* No user access to P3 region */
nkeynes@939
   579
    } else if( start_addr >= 0x80000000 ) {
nkeynes@939
   580
        return TRUE; // No mapping - legal but meaningless
nkeynes@939
   581
    }
nkeynes@939
   582
nkeynes@939
   583
    if( npages == 0 ) {
nkeynes@939
   584
        struct utlb_1k_entry *ent;
nkeynes@939
   585
        int i, idx = (start_addr >> 10) & 0x03;
nkeynes@939
   586
        if( IS_1K_PAGE_ENTRY(*ptr) ) {
nkeynes@939
   587
            ent = (struct utlb_1k_entry *)*ptr;
nkeynes@939
   588
        } else {
nkeynes@939
   589
            ent = mmu_utlb_1k_alloc();
nkeynes@939
   590
            /* New 1K struct - init to previous contents of region */
nkeynes@939
   591
            for( i=0; i<4; i++ ) {
nkeynes@939
   592
                ent->subpages[i] = *ptr;
nkeynes@939
   593
                ent->user_subpages[i] = *uptr;
nkeynes@939
   594
            }
nkeynes@939
   595
            *ptr = &ent->fn;
nkeynes@939
   596
            *uptr = &ent->user_fn;
nkeynes@939
   597
        }
nkeynes@939
   598
        
nkeynes@939
   599
        if( priv_page != NULL ) {
nkeynes@946
   600
            if( ent->subpages[idx] == privdefs->tlb_miss ) {
nkeynes@939
   601
                ent->subpages[idx] = priv_page;
nkeynes@939
   602
            } else {
nkeynes@939
   603
                mapping_ok = FALSE;
nkeynes@946
   604
                ent->subpages[idx] = privdefs->tlb_multihit;
nkeynes@939
   605
            }
nkeynes@939
   606
        }
nkeynes@939
   607
        if( user_page != NULL ) {
nkeynes@946
   608
            if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
nkeynes@939
   609
                ent->user_subpages[idx] = user_page;
nkeynes@939
   610
            } else {
nkeynes@939
   611
                mapping_ok = FALSE;
nkeynes@946
   612
                ent->user_subpages[idx] = userdefs->tlb_multihit;
nkeynes@939
   613
            }
nkeynes@939
   614
        }
nkeynes@939
   615
        
nkeynes@939
   616
    } else {
nkeynes@943
   617
        if( priv_page != NULL ) {
nkeynes@946
   618
            /* Privileged mapping only */
nkeynes@946
   619
            for( i=0; i<npages; i++ ) {
nkeynes@946
   620
                if( *ptr == privdefs->tlb_miss ) {
nkeynes@946
   621
                    *ptr++ = priv_page;
nkeynes@946
   622
                } else {
nkeynes@946
   623
                    mapping_ok = FALSE;
nkeynes@946
   624
                    *ptr++ = privdefs->tlb_multihit;
nkeynes@939
   625
                }
nkeynes@939
   626
            }
nkeynes@946
   627
        }
nkeynes@946
   628
        if( user_page != NULL ) {
nkeynes@943
   629
            /* User mapping only (eg ASID change remap w/ SV=1) */
nkeynes@939
   630
            for( i=0; i<npages; i++ ) {
nkeynes@946
   631
                if( *uptr == userdefs->tlb_miss ) {
nkeynes@939
   632
                    *uptr++ = user_page;
nkeynes@939
   633
                } else {
nkeynes@939
   634
                    mapping_ok = FALSE;
nkeynes@946
   635
                    *uptr++ = userdefs->tlb_multihit;
nkeynes@939
   636
                }
nkeynes@939
   637
            }        
nkeynes@939
   638
        }
nkeynes@939
   639
    }
nkeynes@946
   640
nkeynes@939
   641
    return mapping_ok;
nkeynes@939
   642
}
nkeynes@939
   643
nkeynes@939
   644
/**
nkeynes@943
   645
 * Remap any pages within the region covered by entryNo, but not including 
nkeynes@943
   646
 * entryNo itself. This is used to reestablish pages that were previously
nkeynes@943
   647
 * covered by a multi-hit exception region when one of the pages is removed.
nkeynes@943
   648
 */
nkeynes@943
   649
static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
nkeynes@943
   650
{
nkeynes@943
   651
    int mask = mmu_utlb[entryNo].mask;
nkeynes@943
   652
    uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
nkeynes@943
   653
    int i;
nkeynes@943
   654
    
nkeynes@943
   655
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@943
   656
        if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
nkeynes@943
   657
            /* Overlapping region */
nkeynes@943
   658
            mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
nkeynes@943
   659
            mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
nkeynes@943
   660
            uint32_t start_addr;
nkeynes@943
   661
            int npages;
nkeynes@943
   662
nkeynes@943
   663
            if( mmu_utlb[i].mask >= mask ) {
nkeynes@943
   664
                /* entry is no larger than the area we're replacing - map completely */
nkeynes@943
   665
                start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
nkeynes@943
   666
                npages = get_tlb_size_pages( mmu_utlb[i].flags );
nkeynes@943
   667
            } else {
nkeynes@943
   668
                /* Otherwise map subset - region covered by removed page */
nkeynes@943
   669
                start_addr = remap_addr;
nkeynes@943
   670
                npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
nkeynes@943
   671
            }
nkeynes@943
   672
nkeynes@943
   673
            if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) { 
nkeynes@943
   674
                mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
nkeynes@943
   675
            } else if( IS_SV_ENABLED() ) {
nkeynes@943
   676
                mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
nkeynes@943
   677
            }
nkeynes@943
   678
nkeynes@943
   679
        }
nkeynes@943
   680
    }
nkeynes@943
   681
}
nkeynes@943
   682
nkeynes@943
   683
/**
nkeynes@939
   684
 * Remove a previous TLB mapping (replacing them with the TLB miss region).
nkeynes@939
   685
 * @return FALSE if any pages were previously mapped to the TLB multihit page, 
nkeynes@939
   686
 * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
nkeynes@939
   687
 */
nkeynes@943
   688
static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
nkeynes@939
   689
{
nkeynes@939
   690
    mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
nkeynes@939
   691
    mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
nkeynes@946
   692
    struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
nkeynes@946
   693
    struct utlb_default_regions *userdefs = privdefs;
nkeynes@946
   694
nkeynes@939
   695
    gboolean unmapping_ok = TRUE;
nkeynes@939
   696
    int i;
nkeynes@939
   697
    
nkeynes@939
   698
    if( (start_addr & 0xFC000000) == 0xE0000000 ) {
nkeynes@939
   699
        /* Storequeue mapping */
nkeynes@946
   700
        privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
nkeynes@946
   701
        userdefs = mmu_user_storequeue_regions;
nkeynes@939
   702
    } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
nkeynes@939
   703
        unmap_user = FALSE;
nkeynes@939
   704
    } else if( start_addr >= 0x80000000 ) {
nkeynes@939
   705
        return TRUE; // No mapping - legal but meaningless
nkeynes@939
   706
    }
nkeynes@939
   707
nkeynes@939
   708
    if( npages == 0 ) { // 1K page
nkeynes@939
   709
        assert( IS_1K_PAGE_ENTRY( *ptr ) );
nkeynes@939
   710
        struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
nkeynes@939
   711
        int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
nkeynes@946
   712
        if( ent->subpages[idx] == privdefs->tlb_multihit ) {
nkeynes@939
   713
            unmapping_ok = FALSE;
nkeynes@939
   714
        }
nkeynes@943
   715
        if( unmap_priv )
nkeynes@946
   716
            ent->subpages[idx] = privdefs->tlb_miss;
nkeynes@943
   717
        if( unmap_user )
nkeynes@946
   718
            ent->user_subpages[idx] = userdefs->tlb_miss;
nkeynes@939
   719
nkeynes@939
   720
        /* If all 4 subpages have the same content, merge them together and
nkeynes@939
   721
         * release the 1K entry
nkeynes@939
   722
         */
nkeynes@939
   723
        mem_region_fn_t priv_page = ent->subpages[0];
nkeynes@939
   724
        mem_region_fn_t user_page = ent->user_subpages[0];
nkeynes@939
   725
        for( i=1; i<4; i++ ) {
nkeynes@939
   726
            if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
nkeynes@939
   727
                mergeable = 0;
nkeynes@939
   728
                break;
nkeynes@939
   729
            }
nkeynes@939
   730
        }
nkeynes@939
   731
        if( mergeable ) {
nkeynes@939
   732
            mmu_utlb_1k_free(ent);
nkeynes@939
   733
            *ptr = priv_page;
nkeynes@939
   734
            *uptr = user_page;
nkeynes@939
   735
        }
nkeynes@939
   736
    } else {
nkeynes@943
   737
        if( unmap_priv ) {
nkeynes@946
   738
            /* Privileged (un)mapping */
nkeynes@939
   739
            for( i=0; i<npages; i++ ) {
nkeynes@946
   740
                if( *ptr == privdefs->tlb_multihit ) {
nkeynes@939
   741
                    unmapping_ok = FALSE;
nkeynes@939
   742
                }
nkeynes@946
   743
                *ptr++ = privdefs->tlb_miss;
nkeynes@946
   744
            }
nkeynes@946
   745
        }
nkeynes@946
   746
        if( unmap_user ) {
nkeynes@946
   747
            /* User (un)mapping */
nkeynes@946
   748
            for( i=0; i<npages; i++ ) {
nkeynes@946
   749
                if( *uptr == userdefs->tlb_multihit ) {
nkeynes@946
   750
                    unmapping_ok = FALSE;
nkeynes@946
   751
                }
nkeynes@946
   752
                *uptr++ = userdefs->tlb_miss;
nkeynes@943
   753
            }            
nkeynes@939
   754
        }
nkeynes@939
   755
    }
nkeynes@943
   756
    
nkeynes@939
   757
    return unmapping_ok;
nkeynes@939
   758
}
nkeynes@939
   759
nkeynes@939
   760
static void mmu_utlb_insert_entry( int entry )
nkeynes@939
   761
{
nkeynes@939
   762
    struct utlb_entry *ent = &mmu_utlb[entry];
nkeynes@939
   763
    mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
nkeynes@939
   764
    mem_region_fn_t upage;
nkeynes@939
   765
    sh4addr_t start_addr = ent->vpn & ent->mask;
nkeynes@939
   766
    int npages = get_tlb_size_pages(ent->flags);
nkeynes@939
   767
nkeynes@946
   768
    if( (start_addr & 0xFC000000) == 0xE0000000 ) {
nkeynes@946
   769
        /* Store queue mappings are a bit different - normal access is fixed to
nkeynes@946
   770
         * the store queue register block, and we only map prefetches through
nkeynes@946
   771
         * the TLB 
nkeynes@946
   772
         */
nkeynes@946
   773
        mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
nkeynes@946
   774
nkeynes@946
   775
        if( (ent->flags & TLB_USERMODE) == 0 ) {
nkeynes@946
   776
            upage = mmu_user_storequeue_regions->tlb_prot;
nkeynes@946
   777
        } else if( IS_STOREQUEUE_PROTECTED() ) {
nkeynes@946
   778
            upage = &p4_region_storequeue_sqmd;
nkeynes@946
   779
        } else {
nkeynes@946
   780
            upage = page;
nkeynes@946
   781
        }
nkeynes@946
   782
nkeynes@946
   783
    }  else {
nkeynes@946
   784
nkeynes@946
   785
        if( (ent->flags & TLB_USERMODE) == 0 ) {
nkeynes@946
   786
            upage = &mem_region_tlb_protected;
nkeynes@946
   787
        } else {        
nkeynes@946
   788
            upage = page;
nkeynes@946
   789
        }
nkeynes@946
   790
nkeynes@946
   791
        if( (ent->flags & TLB_WRITABLE) == 0 ) {
nkeynes@946
   792
            page->write_long = (mem_write_fn_t)tlb_protected_write;
nkeynes@946
   793
            page->write_word = (mem_write_fn_t)tlb_protected_write;
nkeynes@946
   794
            page->write_byte = (mem_write_fn_t)tlb_protected_write;
nkeynes@946
   795
            page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
nkeynes@975
   796
            page->read_byte_for_write = (mem_read_fn_t)tlb_protected_read_for_write;
nkeynes@946
   797
            mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
nkeynes@946
   798
        } else if( (ent->flags & TLB_DIRTY) == 0 ) {
nkeynes@946
   799
            page->write_long = (mem_write_fn_t)tlb_initial_write;
nkeynes@946
   800
            page->write_word = (mem_write_fn_t)tlb_initial_write;
nkeynes@946
   801
            page->write_byte = (mem_write_fn_t)tlb_initial_write;
nkeynes@946
   802
            page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
nkeynes@975
   803
            page->read_byte_for_write = (mem_read_fn_t)tlb_initial_read_for_write;
nkeynes@946
   804
            mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
nkeynes@946
   805
        } else {
nkeynes@946
   806
            mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
nkeynes@946
   807
        }
nkeynes@939
   808
    }
nkeynes@946
   809
    
nkeynes@939
   810
    mmu_utlb_pages[entry].user_fn = upage;
nkeynes@939
   811
nkeynes@939
   812
    /* Is page visible? */
nkeynes@939
   813
    if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) { 
nkeynes@939
   814
        mmu_utlb_map_pages( page, upage, start_addr, npages );
nkeynes@939
   815
    } else if( IS_SV_ENABLED() ) {
nkeynes@939
   816
        mmu_utlb_map_pages( page, NULL, start_addr, npages );
nkeynes@939
   817
    }
nkeynes@939
   818
}
nkeynes@939
   819
nkeynes@939
   820
static void mmu_utlb_remove_entry( int entry )
nkeynes@939
   821
{
nkeynes@939
   822
    int i, j;
nkeynes@939
   823
    struct utlb_entry *ent = &mmu_utlb[entry];
nkeynes@939
   824
    sh4addr_t start_addr = ent->vpn&ent->mask;
nkeynes@939
   825
    mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
nkeynes@939
   826
    mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
nkeynes@939
   827
    gboolean unmap_user;
nkeynes@939
   828
    int npages = get_tlb_size_pages(ent->flags);
nkeynes@939
   829
    
nkeynes@939
   830
    if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
nkeynes@939
   831
        unmap_user = TRUE;
nkeynes@939
   832
    } else if( IS_SV_ENABLED() ) {
nkeynes@939
   833
        unmap_user = FALSE;
nkeynes@939
   834
    } else {
nkeynes@939
   835
        return; // Not mapped
nkeynes@939
   836
    }
nkeynes@939
   837
    
nkeynes@943
   838
    gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
nkeynes@939
   839
    
nkeynes@939
   840
    if( !clean_unmap ) {
nkeynes@943
   841
        mmu_utlb_remap_pages( TRUE, unmap_user, entry );
nkeynes@939
   842
    }
nkeynes@939
   843
}
nkeynes@939
   844
nkeynes@939
   845
static void mmu_utlb_register_all()
nkeynes@939
   846
{
nkeynes@939
   847
    int i;
nkeynes@939
   848
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@939
   849
        if( mmu_utlb[i].flags & TLB_VALID ) 
nkeynes@939
   850
            mmu_utlb_insert_entry( i );
nkeynes@939
   851
    }
nkeynes@939
   852
}
nkeynes@939
   853
nkeynes@550
   854
static void mmu_invalidate_tlb()
nkeynes@550
   855
{
nkeynes@550
   856
    int i;
nkeynes@550
   857
    for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
nkeynes@736
   858
        mmu_itlb[i].flags &= (~TLB_VALID);
nkeynes@550
   859
    }
nkeynes@939
   860
    if( IS_TLB_ENABLED() ) {
nkeynes@939
   861
        for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@939
   862
            if( mmu_utlb[i].flags & TLB_VALID ) {
nkeynes@939
   863
                mmu_utlb_remove_entry( i );
nkeynes@939
   864
            }
nkeynes@939
   865
        }
nkeynes@939
   866
    }
nkeynes@550
   867
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@736
   868
        mmu_utlb[i].flags &= (~TLB_VALID);
nkeynes@550
   869
    }
nkeynes@550
   870
}
nkeynes@550
   871
nkeynes@586
   872
/******************************************************************************/
nkeynes@586
   873
/*                        MMU TLB address translation                         */
nkeynes@586
   874
/******************************************************************************/
nkeynes@586
   875
nkeynes@586
   876
/**
nkeynes@939
   877
 * Translate a 32-bit address into a UTLB entry number. Does not check for
nkeynes@939
   878
 * page protection etc.
nkeynes@939
   879
 * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
nkeynes@586
   880
 */
nkeynes@939
   881
int mmu_utlb_entry_for_vpn( uint32_t vpn )
nkeynes@939
   882
{
nkeynes@973
   883
    mmu_urc++;
nkeynes@939
   884
    mem_region_fn_t fn = sh4_address_space[vpn>>12];
nkeynes@939
   885
    if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
nkeynes@939
   886
        return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
nkeynes@973
   887
    } else if( fn >= &mmu_utlb_1k_pages[0].fn && fn < &mmu_utlb_1k_pages[UTLB_ENTRY_COUNT].fn ) {
nkeynes@973
   888
        struct utlb_1k_entry *ent = (struct utlb_1k_entry *)fn;
nkeynes@973
   889
        fn = ent->subpages[(vpn>>10)&0x03];
nkeynes@973
   890
        if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
nkeynes@973
   891
            return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
nkeynes@973
   892
        }            
nkeynes@980
   893
    } 
nkeynes@980
   894
    if( fn == &mem_region_tlb_multihit ) {
nkeynes@939
   895
        return -2;
nkeynes@939
   896
    } else {
nkeynes@939
   897
        return -1;
nkeynes@939
   898
    }
nkeynes@939
   899
}
nkeynes@939
   900
nkeynes@586
   901
nkeynes@586
   902
/**
nkeynes@586
   903
 * Perform the actual utlb lookup w/ asid matching.
nkeynes@586
   904
 * Possible utcomes are:
nkeynes@586
   905
 *   0..63 Single match - good, return entry found
nkeynes@586
   906
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
   907
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
   908
 * @param vpn virtual address to resolve
nkeynes@586
   909
 * @return the resultant UTLB entry, or an error.
nkeynes@586
   910
 */
nkeynes@586
   911
static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
nkeynes@586
   912
{
nkeynes@586
   913
    int result = -1;
nkeynes@586
   914
    unsigned int i;
nkeynes@586
   915
nkeynes@586
   916
    mmu_urc++;
nkeynes@586
   917
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@736
   918
        mmu_urc = 0;
nkeynes@586
   919
    }
nkeynes@586
   920
nkeynes@586
   921
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@736
   922
        if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@826
   923
                ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
nkeynes@736
   924
                ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@736
   925
            if( result != -1 ) {
nkeynes@736
   926
                return -2;
nkeynes@736
   927
            }
nkeynes@736
   928
            result = i;
nkeynes@736
   929
        }
nkeynes@586
   930
    }
nkeynes@586
   931
    return result;
nkeynes@586
   932
}
nkeynes@586
   933
nkeynes@586
   934
/**
nkeynes@586
   935
 * Perform the actual utlb lookup matching on vpn only
nkeynes@586
   936
 * Possible utcomes are:
nkeynes@586
   937
 *   0..63 Single match - good, return entry found
nkeynes@586
   938
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
   939
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
   940
 * @param vpn virtual address to resolve
nkeynes@586
   941
 * @return the resultant UTLB entry, or an error.
nkeynes@586
   942
 */
nkeynes@586
   943
static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
nkeynes@586
   944
{
nkeynes@586
   945
    int result = -1;
nkeynes@586
   946
    unsigned int i;
nkeynes@586
   947
nkeynes@586
   948
    mmu_urc++;
nkeynes@586
   949
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@736
   950
        mmu_urc = 0;
nkeynes@586
   951
    }
nkeynes@586
   952
nkeynes@586
   953
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@736
   954
        if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@736
   955
                ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@736
   956
            if( result != -1 ) {
nkeynes@736
   957
                return -2;
nkeynes@736
   958
            }
nkeynes@736
   959
            result = i;
nkeynes@736
   960
        }
nkeynes@586
   961
    }
nkeynes@586
   962
nkeynes@586
   963
    return result;
nkeynes@586
   964
}
nkeynes@586
   965
nkeynes@586
   966
/**
nkeynes@586
   967
 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
nkeynes@586
   968
 * @return the number (0-3) of the replaced entry.
nkeynes@586
   969
 */
nkeynes@586
   970
static int inline mmu_itlb_update_from_utlb( int entryNo )
nkeynes@586
   971
{
nkeynes@586
   972
    int replace;
nkeynes@586
   973
    /* Determine entry to replace based on lrui */
nkeynes@586
   974
    if( (mmu_lrui & 0x38) == 0x38 ) {
nkeynes@736
   975
        replace = 0;
nkeynes@736
   976
        mmu_lrui = mmu_lrui & 0x07;
nkeynes@586
   977
    } else if( (mmu_lrui & 0x26) == 0x06 ) {
nkeynes@736
   978
        replace = 1;
nkeynes@736
   979
        mmu_lrui = (mmu_lrui & 0x19) | 0x20;
nkeynes@586
   980
    } else if( (mmu_lrui & 0x15) == 0x01 ) {
nkeynes@736
   981
        replace = 2;
nkeynes@736
   982
        mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
nkeynes@586
   983
    } else { // Note - gets invalid entries too
nkeynes@736
   984
        replace = 3;
nkeynes@736
   985
        mmu_lrui = (mmu_lrui | 0x0B);
nkeynes@826
   986
    }
nkeynes@586
   987
nkeynes@586
   988
    mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
nkeynes@586
   989
    mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
nkeynes@586
   990
    mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
nkeynes@586
   991
    mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
nkeynes@586
   992
    mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
nkeynes@586
   993
    return replace;
nkeynes@586
   994
}
nkeynes@586
   995
nkeynes@586
   996
/**
nkeynes@586
   997
 * Perform the actual itlb lookup w/ asid protection
nkeynes@586
   998
 * Possible utcomes are:
nkeynes@586
   999
 *   0..63 Single match - good, return entry found
nkeynes@586
  1000
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
  1001
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
  1002
 * @param vpn virtual address to resolve
nkeynes@586
  1003
 * @return the resultant ITLB entry, or an error.
nkeynes@586
  1004
 */
nkeynes@586
  1005
static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
nkeynes@586
  1006
{
nkeynes@586
  1007
    int result = -1;
nkeynes@586
  1008
    unsigned int i;
nkeynes@586
  1009
nkeynes@586
  1010
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@736
  1011
        if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@826
  1012
                ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
nkeynes@736
  1013
                ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@736
  1014
            if( result != -1 ) {
nkeynes@736
  1015
                return -2;
nkeynes@736
  1016
            }
nkeynes@736
  1017
            result = i;
nkeynes@736
  1018
        }
nkeynes@586
  1019
    }
nkeynes@586
  1020
nkeynes@586
  1021
    if( result == -1 ) {
nkeynes@939
  1022
        int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
nkeynes@736
  1023
        if( utlbEntry < 0 ) {
nkeynes@736
  1024
            return utlbEntry;
nkeynes@736
  1025
        } else {
nkeynes@736
  1026
            return mmu_itlb_update_from_utlb( utlbEntry );
nkeynes@736
  1027
        }
nkeynes@586
  1028
    }
nkeynes@586
  1029
nkeynes@586
  1030
    switch( result ) {
nkeynes@586
  1031
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@586
  1032
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@586
  1033
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@586
  1034
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@586
  1035
    }
nkeynes@736
  1036
nkeynes@586
  1037
    return result;
nkeynes@586
  1038
}
nkeynes@586
  1039
nkeynes@586
  1040
/**
nkeynes@586
  1041
 * Perform the actual itlb lookup on vpn only
nkeynes@586
  1042
 * Possible utcomes are:
nkeynes@586
  1043
 *   0..63 Single match - good, return entry found
nkeynes@586
  1044
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
  1045
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
  1046
 * @param vpn virtual address to resolve
nkeynes@586
  1047
 * @return the resultant ITLB entry, or an error.
nkeynes@586
  1048
 */
nkeynes@586
  1049
static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
nkeynes@586
  1050
{
nkeynes@586
  1051
    int result = -1;
nkeynes@586
  1052
    unsigned int i;
nkeynes@586
  1053
nkeynes@586
  1054
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@736
  1055
        if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@736
  1056
                ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@736
  1057
            if( result != -1 ) {
nkeynes@736
  1058
                return -2;
nkeynes@736
  1059
            }
nkeynes@736
  1060
            result = i;
nkeynes@736
  1061
        }
nkeynes@586
  1062
    }
nkeynes@586
  1063
nkeynes@586
  1064
    if( result == -1 ) {
nkeynes@736
  1065
        int utlbEntry = mmu_utlb_lookup_vpn( vpn );
nkeynes@736
  1066
        if( utlbEntry < 0 ) {
nkeynes@736
  1067
            return utlbEntry;
nkeynes@736
  1068
        } else {
nkeynes@736
  1069
            return mmu_itlb_update_from_utlb( utlbEntry );
nkeynes@736
  1070
        }
nkeynes@586
  1071
    }
nkeynes@586
  1072
nkeynes@586
  1073
    switch( result ) {
nkeynes@586
  1074
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@586
  1075
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@586
  1076
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@586
  1077
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@586
  1078
    }
nkeynes@736
  1079
nkeynes@586
  1080
    return result;
nkeynes@586
  1081
}
nkeynes@927
  1082
nkeynes@586
  1083
/**
nkeynes@586
  1084
 * Update the icache for an untranslated address
nkeynes@586
  1085
 */
nkeynes@905
  1086
static inline void mmu_update_icache_phys( sh4addr_t addr )
nkeynes@586
  1087
{
nkeynes@586
  1088
    if( (addr & 0x1C000000) == 0x0C000000 ) {
nkeynes@736
  1089
        /* Main ram */
nkeynes@736
  1090
        sh4_icache.page_vma = addr & 0xFF000000;
nkeynes@736
  1091
        sh4_icache.page_ppa = 0x0C000000;
nkeynes@736
  1092
        sh4_icache.mask = 0xFF000000;
nkeynes@934
  1093
        sh4_icache.page = dc_main_ram;
nkeynes@586
  1094
    } else if( (addr & 0x1FE00000) == 0 ) {
nkeynes@736
  1095
        /* BIOS ROM */
nkeynes@736
  1096
        sh4_icache.page_vma = addr & 0xFFE00000;
nkeynes@736
  1097
        sh4_icache.page_ppa = 0;
nkeynes@736
  1098
        sh4_icache.mask = 0xFFE00000;
nkeynes@934
  1099
        sh4_icache.page = dc_boot_rom;
nkeynes@586
  1100
    } else {
nkeynes@736
  1101
        /* not supported */
nkeynes@736
  1102
        sh4_icache.page_vma = -1;
nkeynes@586
  1103
    }
nkeynes@586
  1104
}
nkeynes@586
  1105
nkeynes@586
  1106
/**
nkeynes@586
  1107
 * Update the sh4_icache structure to describe the page(s) containing the
nkeynes@586
  1108
 * given vma. If the address does not reference a RAM/ROM region, the icache
nkeynes@586
  1109
 * will be invalidated instead.
nkeynes@586
  1110
 * If AT is on, this method will raise TLB exceptions normally
nkeynes@586
  1111
 * (hence this method should only be used immediately prior to execution of
nkeynes@586
  1112
 * code), and otherwise will set the icache according to the matching TLB entry.
nkeynes@586
  1113
 * If AT is off, this method will set the entire referenced RAM/ROM region in
nkeynes@586
  1114
 * the icache.
nkeynes@586
  1115
 * @return TRUE if the update completed (successfully or otherwise), FALSE
nkeynes@586
  1116
 * if an exception was raised.
nkeynes@586
  1117
 */
nkeynes@905
  1118
gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
nkeynes@586
  1119
{
nkeynes@586
  1120
    int entryNo;
nkeynes@586
  1121
    if( IS_SH4_PRIVMODE()  ) {
nkeynes@736
  1122
        if( addr & 0x80000000 ) {
nkeynes@736
  1123
            if( addr < 0xC0000000 ) {
nkeynes@736
  1124
                /* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@736
  1125
                mmu_update_icache_phys(addr);
nkeynes@736
  1126
                return TRUE;
nkeynes@736
  1127
            } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
nkeynes@939
  1128
                RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
nkeynes@736
  1129
                return FALSE;
nkeynes@736
  1130
            }
nkeynes@736
  1131
        }
nkeynes@586
  1132
nkeynes@736
  1133
        uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@736
  1134
        if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@736
  1135
            mmu_update_icache_phys(addr);
nkeynes@736
  1136
            return TRUE;
nkeynes@736
  1137
        }
nkeynes@736
  1138
nkeynes@826
  1139
        if( (mmucr & MMUCR_SV) == 0 )
nkeynes@807
  1140
        	entryNo = mmu_itlb_lookup_vpn_asid( addr );
nkeynes@807
  1141
        else
nkeynes@807
  1142
        	entryNo = mmu_itlb_lookup_vpn( addr );
nkeynes@586
  1143
    } else {
nkeynes@736
  1144
        if( addr & 0x80000000 ) {
nkeynes@939
  1145
            RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
nkeynes@736
  1146
            return FALSE;
nkeynes@736
  1147
        }
nkeynes@586
  1148
nkeynes@736
  1149
        uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@736
  1150
        if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@736
  1151
            mmu_update_icache_phys(addr);
nkeynes@736
  1152
            return TRUE;
nkeynes@736
  1153
        }
nkeynes@736
  1154
nkeynes@807
  1155
        entryNo = mmu_itlb_lookup_vpn_asid( addr );
nkeynes@807
  1156
nkeynes@736
  1157
        if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
nkeynes@939
  1158
            RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
nkeynes@736
  1159
            return FALSE;
nkeynes@736
  1160
        }
nkeynes@586
  1161
    }
nkeynes@586
  1162
nkeynes@586
  1163
    switch(entryNo) {
nkeynes@586
  1164
    case -1:
nkeynes@939
  1165
    RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
nkeynes@736
  1166
    return FALSE;
nkeynes@586
  1167
    case -2:
nkeynes@939
  1168
    RAISE_TLB_MULTIHIT_ERROR(addr);
nkeynes@736
  1169
    return FALSE;
nkeynes@586
  1170
    default:
nkeynes@736
  1171
        sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
nkeynes@736
  1172
        sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
nkeynes@736
  1173
        if( sh4_icache.page == NULL ) {
nkeynes@736
  1174
            sh4_icache.page_vma = -1;
nkeynes@736
  1175
        } else {
nkeynes@736
  1176
            sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
nkeynes@736
  1177
            sh4_icache.mask = mmu_itlb[entryNo].mask;
nkeynes@736
  1178
        }
nkeynes@736
  1179
        return TRUE;
nkeynes@586
  1180
    }
nkeynes@586
  1181
}
nkeynes@586
  1182
nkeynes@597
  1183
/**
nkeynes@826
  1184
 * Translate address for disassembly purposes (ie performs an instruction
nkeynes@597
  1185
 * lookup) - does not raise exceptions or modify any state, and ignores
nkeynes@597
  1186
 * protection bits. Returns the translated address, or MMU_VMA_ERROR
nkeynes@826
  1187
 * on translation failure.
nkeynes@597
  1188
 */
nkeynes@905
  1189
sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
nkeynes@597
  1190
{
nkeynes@597
  1191
    if( vma & 0x80000000 ) {
nkeynes@736
  1192
        if( vma < 0xC0000000 ) {
nkeynes@736
  1193
            /* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@736
  1194
            return VMA_TO_EXT_ADDR(vma);
nkeynes@736
  1195
        } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
nkeynes@736
  1196
            /* Not translatable */
nkeynes@736
  1197
            return MMU_VMA_ERROR;
nkeynes@736
  1198
        }
nkeynes@597
  1199
    }
nkeynes@597
  1200
nkeynes@597
  1201
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@597
  1202
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@736
  1203
        return VMA_TO_EXT_ADDR(vma);
nkeynes@597
  1204
    }
nkeynes@736
  1205
nkeynes@597
  1206
    int entryNo = mmu_itlb_lookup_vpn( vma );
nkeynes@597
  1207
    if( entryNo == -2 ) {
nkeynes@736
  1208
        entryNo = mmu_itlb_lookup_vpn_asid( vma );
nkeynes@597
  1209
    }
nkeynes@597
  1210
    if( entryNo < 0 ) {
nkeynes@736
  1211
        return MMU_VMA_ERROR;
nkeynes@597
  1212
    } else {
nkeynes@826
  1213
        return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
nkeynes@826
  1214
        (vma & (~mmu_itlb[entryNo].mask));
nkeynes@597
  1215
    }
nkeynes@597
  1216
}
nkeynes@597
  1217
nkeynes@1217
  1218
/**
nkeynes@1217
  1219
 * Translate a virtual to physical address for reading, raising exceptions as
nkeynes@1217
  1220
 * observed.
nkeynes@1217
  1221
 * @param addr Pointer to the virtual memory address. On successful return,
nkeynes@1217
  1222
 * will be updated to contain the physical address.
nkeynes@1217
  1223
 */
nkeynes@1217
  1224
mem_region_fn_t FASTCALL mmu_get_region_for_vma_read( sh4vma_t *paddr )
nkeynes@1217
  1225
{
nkeynes@1217
  1226
    sh4vma_t addr = *paddr;
nkeynes@1217
  1227
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@1217
  1228
    if( addr & 0x80000000 ) {
nkeynes@1217
  1229
        if( IS_SH4_PRIVMODE() ) {
nkeynes@1217
  1230
            if( addr >= 0xE0000000 ) {
nkeynes@1217
  1231
                return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
nkeynes@1217
  1232
            } else if( addr < 0xC0000000 ) {
nkeynes@1217
  1233
                /* P1, P2 regions are pass-through (no translation) */
nkeynes@1217
  1234
                return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
nkeynes@1217
  1235
            }
nkeynes@1217
  1236
        } else {
nkeynes@1217
  1237
            if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@1217
  1238
                    ((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@1217
  1239
                /* Conditional user-mode access to the store-queue (no translation) */
nkeynes@1217
  1240
                return &p4_region_storequeue;
nkeynes@1217
  1241
            }
nkeynes@1217
  1242
            sh4_raise_exception(EXC_DATA_ADDR_READ);
nkeynes@1217
  1243
            return NULL;
nkeynes@1217
  1244
        }
nkeynes@1217
  1245
    }
nkeynes@1217
  1246
nkeynes@1217
  1247
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@1217
  1248
        return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
nkeynes@1217
  1249
    }
nkeynes@1217
  1250
nkeynes@1217
  1251
    /* If we get this far, translation is required */
nkeynes@1217
  1252
    int entryNo;
nkeynes@1217
  1253
    if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@1217
  1254
        entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@1217
  1255
    } else {
nkeynes@1217
  1256
        entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@1217
  1257
    }
nkeynes@1217
  1258
nkeynes@1217
  1259
    switch(entryNo) {
nkeynes@1217
  1260
    case -1:
nkeynes@1217
  1261
        RAISE_TLB_ERROR(EXC_TLB_MISS_READ,addr);
nkeynes@1217
  1262
        return NULL;
nkeynes@1217
  1263
    case -2:
nkeynes@1217
  1264
        RAISE_TLB_MULTIHIT_ERROR(addr);
nkeynes@1217
  1265
        return NULL;
nkeynes@1217
  1266
    default:
nkeynes@1217
  1267
        if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
nkeynes@1217
  1268
                !IS_SH4_PRIVMODE() ) {
nkeynes@1217
  1269
            /* protection violation */
nkeynes@1217
  1270
            RAISE_MEM_ERROR(EXC_TLB_PROT_READ,addr);
nkeynes@1217
  1271
            return NULL;
nkeynes@1217
  1272
        }
nkeynes@1217
  1273
nkeynes@1217
  1274
        /* finally generate the target address */
nkeynes@1217
  1275
        sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
nkeynes@1217
  1276
                (addr & (~mmu_utlb[entryNo].mask));
nkeynes@1217
  1277
        if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
nkeynes@1217
  1278
            addr = pma | 0xE0000000;
nkeynes@1217
  1279
            *paddr = addr;
nkeynes@1217
  1280
            return sh4_address_space[addr>>12];
nkeynes@1217
  1281
        } else {
nkeynes@1217
  1282
            *paddr = pma;
nkeynes@1217
  1283
            return sh4_ext_address_space[pma>>12];
nkeynes@1217
  1284
        }
nkeynes@1217
  1285
    }
nkeynes@1217
  1286
}
nkeynes@1217
  1287
nkeynes@1217
  1288
/**
nkeynes@1217
  1289
 * Translate a virtual to physical address for prefetch, which mostly
nkeynes@1217
  1290
 * does not raise exceptions.
nkeynes@1217
  1291
 * @param addr Pointer to the virtual memory address. On successful return,
nkeynes@1217
  1292
 * will be updated to contain the physical address.
nkeynes@1217
  1293
 */
nkeynes@1217
  1294
mem_region_fn_t FASTCALL mmu_get_region_for_vma_prefetch( sh4vma_t *paddr )
nkeynes@1217
  1295
{
nkeynes@1217
  1296
    sh4vma_t addr = *paddr;
nkeynes@1217
  1297
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@1217
  1298
    if( addr & 0x80000000 ) {
nkeynes@1217
  1299
        if( IS_SH4_PRIVMODE() ) {
nkeynes@1217
  1300
            if( addr >= 0xE0000000 ) {
nkeynes@1217
  1301
                return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
nkeynes@1217
  1302
            } else if( addr < 0xC0000000 ) {
nkeynes@1217
  1303
                /* P1, P2 regions are pass-through (no translation) */
nkeynes@1217
  1304
                return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
nkeynes@1217
  1305
            }
nkeynes@1217
  1306
        } else {
nkeynes@1217
  1307
            if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@1217
  1308
                    ((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@1217
  1309
                /* Conditional user-mode access to the store-queue (no translation) */
nkeynes@1217
  1310
                return &p4_region_storequeue;
nkeynes@1217
  1311
            }
nkeynes@1217
  1312
            sh4_raise_exception(EXC_DATA_ADDR_READ);
nkeynes@1217
  1313
            return NULL;
nkeynes@1217
  1314
        }
nkeynes@1217
  1315
    }
nkeynes@1217
  1316
nkeynes@1217
  1317
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@1217
  1318
        return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
nkeynes@1217
  1319
    }
nkeynes@1217
  1320
nkeynes@1217
  1321
    /* If we get this far, translation is required */
nkeynes@1217
  1322
    int entryNo;
nkeynes@1217
  1323
    if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@1217
  1324
        entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@1217
  1325
    } else {
nkeynes@1217
  1326
        entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@1217
  1327
    }
nkeynes@1217
  1328
nkeynes@1217
  1329
    switch(entryNo) {
nkeynes@1217
  1330
    case -1:
nkeynes@1217
  1331
        return &mem_region_unmapped;
nkeynes@1217
  1332
    case -2:
nkeynes@1217
  1333
        RAISE_TLB_MULTIHIT_ERROR(addr);
nkeynes@1217
  1334
        return NULL;
nkeynes@1217
  1335
    default:
nkeynes@1217
  1336
        if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
nkeynes@1217
  1337
                !IS_SH4_PRIVMODE() ) {
nkeynes@1217
  1338
            /* protection violation */
nkeynes@1217
  1339
            return &mem_region_unmapped;
nkeynes@1217
  1340
        }
nkeynes@1217
  1341
nkeynes@1217
  1342
        /* finally generate the target address */
nkeynes@1217
  1343
        sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
nkeynes@1217
  1344
                (addr & (~mmu_utlb[entryNo].mask));
nkeynes@1217
  1345
        if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
nkeynes@1217
  1346
            addr = pma | 0xE0000000;
nkeynes@1217
  1347
            *paddr = addr;
nkeynes@1217
  1348
            return sh4_address_space[addr>>12];
nkeynes@1217
  1349
        } else {
nkeynes@1217
  1350
            *paddr = pma;
nkeynes@1217
  1351
            return sh4_ext_address_space[pma>>12];
nkeynes@1217
  1352
        }
nkeynes@1217
  1353
    }
nkeynes@1217
  1354
}
nkeynes@1217
  1355
nkeynes@1217
  1356
/**
nkeynes@1217
  1357
 * Translate a virtual to physical address for writing, raising exceptions as
nkeynes@1217
  1358
 * observed.
nkeynes@1217
  1359
 */
nkeynes@1217
  1360
mem_region_fn_t FASTCALL mmu_get_region_for_vma_write( sh4vma_t *paddr )
nkeynes@1217
  1361
{
nkeynes@1217
  1362
    sh4vma_t addr = *paddr;
nkeynes@1217
  1363
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@1217
  1364
    if( addr & 0x80000000 ) {
nkeynes@1217
  1365
        if( IS_SH4_PRIVMODE() ) {
nkeynes@1217
  1366
            if( addr >= 0xE0000000 ) {
nkeynes@1217
  1367
                return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
nkeynes@1217
  1368
            } else if( addr < 0xC0000000 ) {
nkeynes@1217
  1369
                /* P1, P2 regions are pass-through (no translation) */
nkeynes@1217
  1370
                return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
nkeynes@1217
  1371
            }
nkeynes@1217
  1372
        } else {
nkeynes@1217
  1373
            if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@1217
  1374
                    ((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@1217
  1375
                /* Conditional user-mode access to the store-queue (no translation) */
nkeynes@1217
  1376
                return &p4_region_storequeue;
nkeynes@1217
  1377
            }
nkeynes@1217
  1378
            sh4_raise_exception(EXC_DATA_ADDR_WRITE);
nkeynes@1217
  1379
            return NULL;
nkeynes@1217
  1380
        }
nkeynes@1217
  1381
    }
nkeynes@1217
  1382
nkeynes@1217
  1383
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@1217
  1384
        return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
nkeynes@1217
  1385
    }
nkeynes@1217
  1386
nkeynes@1217
  1387
    /* If we get this far, translation is required */
nkeynes@1217
  1388
    int entryNo;
nkeynes@1217
  1389
    if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@1217
  1390
        entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@1217
  1391
    } else {
nkeynes@1217
  1392
        entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@1217
  1393
    }
nkeynes@1217
  1394
nkeynes@1217
  1395
    switch(entryNo) {
nkeynes@1217
  1396
    case -1:
nkeynes@1217
  1397
        RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE,addr);
nkeynes@1217
  1398
        return NULL;
nkeynes@1217
  1399
    case -2:
nkeynes@1217
  1400
        RAISE_TLB_MULTIHIT_ERROR(addr);
nkeynes@1217
  1401
        return NULL;
nkeynes@1217
  1402
    default:
nkeynes@1217
  1403
        if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
nkeynes@1217
  1404
                : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
nkeynes@1217
  1405
            /* protection violation */
nkeynes@1217
  1406
            RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE,addr);
nkeynes@1217
  1407
            return NULL;
nkeynes@1217
  1408
        }
nkeynes@1217
  1409
nkeynes@1217
  1410
        if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
nkeynes@1217
  1411
            RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
nkeynes@1217
  1412
            return NULL;
nkeynes@1217
  1413
        }
nkeynes@1217
  1414
nkeynes@1217
  1415
        /* finally generate the target address */
nkeynes@1217
  1416
        sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
nkeynes@1217
  1417
                (addr & (~mmu_utlb[entryNo].mask));
nkeynes@1217
  1418
        if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
nkeynes@1217
  1419
            addr = pma | 0xE0000000;
nkeynes@1217
  1420
            *paddr = addr;
nkeynes@1217
  1421
            return sh4_address_space[addr>>12];
nkeynes@1217
  1422
        } else {
nkeynes@1217
  1423
            *paddr = pma;
nkeynes@1217
  1424
            return sh4_ext_address_space[pma>>12];
nkeynes@1217
  1425
        }
nkeynes@1217
  1426
    }
nkeynes@1217
  1427
}
nkeynes@1217
  1428
nkeynes@1217
  1429
nkeynes@1217
  1430
nkeynes@939
  1431
/********************** TLB Direct-Access Regions ***************************/
nkeynes@939
  1432
#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
nkeynes@939
  1433
nkeynes@939
  1434
int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
nkeynes@939
  1435
{
nkeynes@939
  1436
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@939
  1437
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
nkeynes@939
  1438
}
nkeynes@939
  1439
nkeynes@939
  1440
void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@939
  1441
{
nkeynes@939
  1442
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@939
  1443
    ent->vpn = val & 0xFFFFFC00;
nkeynes@939
  1444
    ent->asid = val & 0x000000FF;
nkeynes@939
  1445
    ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
nkeynes@939
  1446
}
nkeynes@939
  1447
nkeynes@939
  1448
int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
nkeynes@939
  1449
{
nkeynes@939
  1450
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@939
  1451
    return (ent->ppn & 0x1FFFFC00) | ent->flags;
nkeynes@939
  1452
}
nkeynes@939
  1453
nkeynes@939
  1454
void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@939
  1455
{
nkeynes@939
  1456
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@939
  1457
    ent->ppn = val & 0x1FFFFC00;
nkeynes@939
  1458
    ent->flags = val & 0x00001DA;
nkeynes@939
  1459
    ent->mask = get_tlb_size_mask(val);
nkeynes@939
  1460
    if( ent->ppn >= 0x1C000000 )
nkeynes@939
  1461
        ent->ppn |= 0xE0000000;
nkeynes@939
  1462
}
nkeynes@939
  1463
nkeynes@939
  1464
#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
nkeynes@939
  1465
#define UTLB_ASSOC(addr) (addr&0x80)
nkeynes@939
  1466
#define UTLB_DATA2(addr) (addr&0x00800000)
nkeynes@939
  1467
nkeynes@939
  1468
int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
nkeynes@939
  1469
{
nkeynes@939
  1470
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@939
  1471
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
nkeynes@939
  1472
    ((ent->flags & TLB_DIRTY)<<7);
nkeynes@939
  1473
}
nkeynes@939
  1474
int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
nkeynes@939
  1475
{
nkeynes@939
  1476
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@939
  1477
    if( UTLB_DATA2(addr) ) {
nkeynes@939
  1478
        return ent->pcmcia;
nkeynes@939
  1479
    } else {
nkeynes@939
  1480
        return (ent->ppn&0x1FFFFC00) | ent->flags;
nkeynes@939
  1481
    }
nkeynes@939
  1482
}
nkeynes@939
  1483
nkeynes@939
  1484
/**
nkeynes@939
  1485
 * Find a UTLB entry for the associative TLB write - same as the normal
nkeynes@939
  1486
 * lookup but ignores the valid bit.
nkeynes@939
  1487
 */
nkeynes@939
  1488
static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@939
  1489
{
nkeynes@939
  1490
    int result = -1;
nkeynes@939
  1491
    unsigned int i;
nkeynes@939
  1492
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@939
  1493
        if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@939
  1494
                ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
nkeynes@939
  1495
                ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@939
  1496
            if( result != -1 ) {
nkeynes@939
  1497
                fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
nkeynes@939
  1498
                return -2;
nkeynes@939
  1499
            }
nkeynes@939
  1500
            result = i;
nkeynes@939
  1501
        }
nkeynes@939
  1502
    }
nkeynes@939
  1503
    return result;
nkeynes@939
  1504
}
nkeynes@939
  1505
nkeynes@939
  1506
/**
nkeynes@939
  1507
 * Find a ITLB entry for the associative TLB write - same as the normal
nkeynes@939
  1508
 * lookup but ignores the valid bit.
nkeynes@939
  1509
 */
nkeynes@939
  1510
static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@939
  1511
{
nkeynes@939
  1512
    int result = -1;
nkeynes@939
  1513
    unsigned int i;
nkeynes@939
  1514
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@939
  1515
        if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@939
  1516
                ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
nkeynes@939
  1517
                ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@939
  1518
            if( result != -1 ) {
nkeynes@939
  1519
                return -2;
nkeynes@939
  1520
            }
nkeynes@939
  1521
            result = i;
nkeynes@939
  1522
        }
nkeynes@939
  1523
    }
nkeynes@939
  1524
    return result;
nkeynes@939
  1525
}
nkeynes@939
  1526
nkeynes@939
  1527
void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@939
  1528
{
nkeynes@939
  1529
    if( UTLB_ASSOC(addr) ) {
nkeynes@939
  1530
        int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
nkeynes@939
  1531
        if( utlb >= 0 ) {
nkeynes@939
  1532
            struct utlb_entry *ent = &mmu_utlb[utlb];
nkeynes@939
  1533
            uint32_t old_flags = ent->flags;
nkeynes@939
  1534
            ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
nkeynes@939
  1535
            ent->flags |= (val & TLB_VALID);
nkeynes@939
  1536
            ent->flags |= ((val & 0x200)>>7);
nkeynes@1090
  1537
            if( IS_TLB_ENABLED() && ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
nkeynes@939
  1538
                if( old_flags & TLB_VALID )
nkeynes@939
  1539
                    mmu_utlb_remove_entry( utlb );
nkeynes@939
  1540
                if( ent->flags & TLB_VALID )
nkeynes@939
  1541
                    mmu_utlb_insert_entry( utlb );
nkeynes@939
  1542
            }
nkeynes@939
  1543
        }
nkeynes@939
  1544
nkeynes@939
  1545
        int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
nkeynes@939
  1546
        if( itlb >= 0 ) {
nkeynes@939
  1547
            struct itlb_entry *ent = &mmu_itlb[itlb];
nkeynes@939
  1548
            ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
nkeynes@939
  1549
        }
nkeynes@939
  1550
nkeynes@939
  1551
        if( itlb == -2 || utlb == -2 ) {
nkeynes@1090
  1552
            RAISE_TLB_MULTIHIT_ERROR(addr); /* FIXME: should this only be raised if TLB is enabled? */
nkeynes@1202
  1553
            SH4_EXCEPTION_EXIT();
nkeynes@939
  1554
            return;
nkeynes@939
  1555
        }
nkeynes@939
  1556
    } else {
nkeynes@939
  1557
        struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@1090
  1558
        if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
nkeynes@939
  1559
            mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
nkeynes@939
  1560
        ent->vpn = (val & 0xFFFFFC00);
nkeynes@939
  1561
        ent->asid = (val & 0xFF);
nkeynes@939
  1562
        ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
nkeynes@939
  1563
        ent->flags |= (val & TLB_VALID);
nkeynes@939
  1564
        ent->flags |= ((val & 0x200)>>7);
nkeynes@1090
  1565
        if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
nkeynes@939
  1566
            mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
nkeynes@939
  1567
    }
nkeynes@939
  1568
}
nkeynes@939
  1569
nkeynes@939
  1570
void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@939
  1571
{
nkeynes@939
  1572
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@939
  1573
    if( UTLB_DATA2(addr) ) {
nkeynes@939
  1574
        ent->pcmcia = val & 0x0000000F;
nkeynes@939
  1575
    } else {
nkeynes@1090
  1576
        if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
nkeynes@939
  1577
            mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
nkeynes@939
  1578
        ent->ppn = (val & 0x1FFFFC00);
nkeynes@939
  1579
        ent->flags = (val & 0x000001FF);
nkeynes@939
  1580
        ent->mask = get_tlb_size_mask(val);
nkeynes@1090
  1581
        if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
nkeynes@939
  1582
            mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
nkeynes@939
  1583
    }
nkeynes@939
  1584
}
nkeynes@939
  1585
nkeynes@939
  1586
struct mem_region_fn p4_region_itlb_addr = {
nkeynes@939
  1587
        mmu_itlb_addr_read, mmu_itlb_addr_write,
nkeynes@939
  1588
        mmu_itlb_addr_read, mmu_itlb_addr_write,
nkeynes@939
  1589
        mmu_itlb_addr_read, mmu_itlb_addr_write,
nkeynes@946
  1590
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1591
        unmapped_prefetch, mmu_itlb_addr_read };
nkeynes@939
  1592
struct mem_region_fn p4_region_itlb_data = {
nkeynes@939
  1593
        mmu_itlb_data_read, mmu_itlb_data_write,
nkeynes@939
  1594
        mmu_itlb_data_read, mmu_itlb_data_write,
nkeynes@939
  1595
        mmu_itlb_data_read, mmu_itlb_data_write,
nkeynes@946
  1596
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1597
        unmapped_prefetch, mmu_itlb_data_read };
nkeynes@939
  1598
struct mem_region_fn p4_region_utlb_addr = {
nkeynes@939
  1599
        mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
nkeynes@939
  1600
        mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
nkeynes@939
  1601
        mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
nkeynes@946
  1602
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1603
        unmapped_prefetch, mmu_utlb_addr_read };
nkeynes@939
  1604
struct mem_region_fn p4_region_utlb_data = {
nkeynes@939
  1605
        mmu_utlb_data_read, mmu_utlb_data_write,
nkeynes@939
  1606
        mmu_utlb_data_read, mmu_utlb_data_write,
nkeynes@939
  1607
        mmu_utlb_data_read, mmu_utlb_data_write,
nkeynes@946
  1608
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1609
        unmapped_prefetch, mmu_utlb_data_read };
nkeynes@939
  1610
nkeynes@939
  1611
/********************** Error regions **************************/
nkeynes@939
  1612
nkeynes@939
  1613
static void FASTCALL address_error_read( sh4addr_t addr, void *exc ) 
nkeynes@939
  1614
{
nkeynes@939
  1615
    RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
nkeynes@1202
  1616
    SH4_EXCEPTION_EXIT();
nkeynes@939
  1617
}
nkeynes@939
  1618
nkeynes@975
  1619
static void FASTCALL address_error_read_for_write( sh4addr_t addr, void *exc ) 
nkeynes@975
  1620
{
nkeynes@975
  1621
    RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
nkeynes@1202
  1622
    SH4_EXCEPTION_EXIT();
nkeynes@975
  1623
}
nkeynes@975
  1624
nkeynes@939
  1625
static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc ) 
nkeynes@939
  1626
{
nkeynes@939
  1627
    RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
nkeynes@1202
  1628
    SH4_EXCEPTION_EXIT();
nkeynes@939
  1629
}
nkeynes@939
  1630
nkeynes@939
  1631
static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@939
  1632
{
nkeynes@939
  1633
    RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
nkeynes@1202
  1634
    SH4_EXCEPTION_EXIT();
nkeynes@939
  1635
}
nkeynes@939
  1636
nkeynes@939
  1637
static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
nkeynes@939
  1638
{
nkeynes@973
  1639
    mmu_urc++;
nkeynes@939
  1640
    RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
nkeynes@1202
  1641
    SH4_EXCEPTION_EXIT();
nkeynes@939
  1642
}
nkeynes@939
  1643
nkeynes@975
  1644
static void FASTCALL tlb_miss_read_for_write( sh4addr_t addr, void *exc )
nkeynes@975
  1645
{
nkeynes@975
  1646
    mmu_urc++;
nkeynes@975
  1647
    RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
nkeynes@1202
  1648
    SH4_EXCEPTION_EXIT();
nkeynes@975
  1649
}
nkeynes@975
  1650
nkeynes@939
  1651
static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
nkeynes@939
  1652
{
nkeynes@973
  1653
    mmu_urc++;
nkeynes@939
  1654
    RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
nkeynes@1202
  1655
    SH4_EXCEPTION_EXIT();
nkeynes@939
  1656
}
nkeynes@939
  1657
nkeynes@939
  1658
static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@939
  1659
{
nkeynes@973
  1660
    mmu_urc++;
nkeynes@939
  1661
    RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
nkeynes@1202
  1662
    SH4_EXCEPTION_EXIT();
nkeynes@975
  1663
}
nkeynes@939
  1664
nkeynes@939
  1665
static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
nkeynes@939
  1666
{
nkeynes@973
  1667
    mmu_urc++;
nkeynes@939
  1668
    RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
nkeynes@1202
  1669
    SH4_EXCEPTION_EXIT();
nkeynes@968
  1670
    return 0; 
nkeynes@953
  1671
}
nkeynes@953
  1672
nkeynes@975
  1673
static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc )
nkeynes@975
  1674
{
nkeynes@975
  1675
    mmu_urc++;
nkeynes@975
  1676
    RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
nkeynes@1202
  1677
    SH4_EXCEPTION_EXIT();
nkeynes@975
  1678
    return 0;
nkeynes@939
  1679
}
nkeynes@939
  1680
nkeynes@939
  1681
static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
nkeynes@939
  1682
{
nkeynes@973
  1683
    mmu_urc++;
nkeynes@939
  1684
    RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
nkeynes@1202
  1685
    SH4_EXCEPTION_EXIT();
nkeynes@968
  1686
    return 0;
nkeynes@939
  1687
}
nkeynes@939
  1688
nkeynes@939
  1689
static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@939
  1690
{
nkeynes@973
  1691
    mmu_urc++;
nkeynes@939
  1692
    RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
nkeynes@1202
  1693
    SH4_EXCEPTION_EXIT();
nkeynes@939
  1694
}
nkeynes@939
  1695
nkeynes@939
  1696
static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@939
  1697
{
nkeynes@973
  1698
    mmu_urc++;
nkeynes@939
  1699
    RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
nkeynes@1202
  1700
    SH4_EXCEPTION_EXIT();
nkeynes@939
  1701
}
nkeynes@975
  1702
nkeynes@975
  1703
static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc )
nkeynes@975
  1704
{
nkeynes@975
  1705
    mmu_urc++;
nkeynes@975
  1706
    RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
nkeynes@1202
  1707
    SH4_EXCEPTION_EXIT();
nkeynes@975
  1708
    return 0;
nkeynes@975
  1709
}    
nkeynes@939
  1710
    
nkeynes@939
  1711
static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
nkeynes@939
  1712
{
nkeynes@951
  1713
    sh4_raise_tlb_multihit(addr);
nkeynes@1202
  1714
    SH4_EXCEPTION_EXIT();
nkeynes@968
  1715
    return 0; 
nkeynes@939
  1716
}
nkeynes@939
  1717
nkeynes@939
  1718
static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
nkeynes@939
  1719
{
nkeynes@951
  1720
    sh4_raise_tlb_multihit(addr);
nkeynes@1202
  1721
    SH4_EXCEPTION_EXIT();
nkeynes@968
  1722
    return 0; 
nkeynes@939
  1723
}
nkeynes@939
  1724
static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@939
  1725
{
nkeynes@951
  1726
    sh4_raise_tlb_multihit(addr);
nkeynes@1202
  1727
    SH4_EXCEPTION_EXIT();
nkeynes@939
  1728
}
nkeynes@939
  1729
nkeynes@939
  1730
/**
nkeynes@939
  1731
 * Note: Per sec 4.6.4 of the SH7750 manual, SQ 
nkeynes@939
  1732
 */
nkeynes@939
  1733
struct mem_region_fn mem_region_address_error = {
nkeynes@939
  1734
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@939
  1735
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@939
  1736
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1737
        (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
nkeynes@975
  1738
        unmapped_prefetch, (mem_read_fn_t)address_error_read_for_write };
nkeynes@939
  1739
nkeynes@939
  1740
struct mem_region_fn mem_region_tlb_miss = {
nkeynes@939
  1741
        (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
nkeynes@939
  1742
        (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
nkeynes@939
  1743
        (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
nkeynes@946
  1744
        (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
nkeynes@975
  1745
        unmapped_prefetch, (mem_read_fn_t)tlb_miss_read_for_write };
nkeynes@939
  1746
nkeynes@946
  1747
struct mem_region_fn mem_region_tlb_protected = {
nkeynes@939
  1748
        (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
nkeynes@939
  1749
        (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
nkeynes@939
  1750
        (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
nkeynes@946
  1751
        (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
nkeynes@975
  1752
        unmapped_prefetch, (mem_read_fn_t)tlb_protected_read_for_write };
nkeynes@939
  1753
nkeynes@939
  1754
struct mem_region_fn mem_region_tlb_multihit = {
nkeynes@939
  1755
        (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
nkeynes@939
  1756
        (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
nkeynes@939
  1757
        (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
nkeynes@946
  1758
        (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
nkeynes@975
  1759
        (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)tlb_multi_hit_read };
nkeynes@939
  1760
        
nkeynes@946
  1761
nkeynes@946
  1762
/* Store-queue regions */
nkeynes@946
  1763
/* These are a bit of a pain - the first 8 fields are controlled by SQMD, while 
nkeynes@946
  1764
 * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
nkeynes@946
  1765
 * some cases), in contrast to the ordinary fields above.
nkeynes@946
  1766
 * 
nkeynes@946
  1767
 * There is probably a simpler way to do this.
nkeynes@946
  1768
 */
nkeynes@946
  1769
nkeynes@946
  1770
struct mem_region_fn p4_region_storequeue = { 
nkeynes@946
  1771
        ccn_storequeue_read_long, ccn_storequeue_write_long,
nkeynes@946
  1772
        unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
nkeynes@946
  1773
        unmapped_read_long, unmapped_write_long,
nkeynes@946
  1774
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1775
        ccn_storequeue_prefetch, unmapped_read_long }; 
nkeynes@946
  1776
nkeynes@946
  1777
struct mem_region_fn p4_region_storequeue_miss = { 
nkeynes@946
  1778
        ccn_storequeue_read_long, ccn_storequeue_write_long,
nkeynes@946
  1779
        unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
nkeynes@946
  1780
        unmapped_read_long, unmapped_write_long,
nkeynes@946
  1781
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1782
        (mem_prefetch_fn_t)tlb_miss_read, unmapped_read_long }; 
nkeynes@946
  1783
nkeynes@946
  1784
struct mem_region_fn p4_region_storequeue_multihit = { 
nkeynes@946
  1785
        ccn_storequeue_read_long, ccn_storequeue_write_long,
nkeynes@946
  1786
        unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
nkeynes@946
  1787
        unmapped_read_long, unmapped_write_long,
nkeynes@946
  1788
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1789
        (mem_prefetch_fn_t)tlb_multi_hit_read, unmapped_read_long }; 
nkeynes@946
  1790
nkeynes@946
  1791
struct mem_region_fn p4_region_storequeue_protected = {
nkeynes@946
  1792
        ccn_storequeue_read_long, ccn_storequeue_write_long,
nkeynes@946
  1793
        unmapped_read_long, unmapped_write_long,
nkeynes@946
  1794
        unmapped_read_long, unmapped_write_long,
nkeynes@946
  1795
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1796
        (mem_prefetch_fn_t)tlb_protected_read, unmapped_read_long };
nkeynes@946
  1797
nkeynes@946
  1798
struct mem_region_fn p4_region_storequeue_sqmd = {
nkeynes@946
  1799
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1800
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1801
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1802
        (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
nkeynes@975
  1803
        (mem_prefetch_fn_t)address_error_read, (mem_read_fn_t)address_error_read_for_write };
nkeynes@939
  1804
        
nkeynes@946
  1805
struct mem_region_fn p4_region_storequeue_sqmd_miss = { 
nkeynes@946
  1806
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1807
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1808
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1809
        (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
nkeynes@975
  1810
        (mem_prefetch_fn_t)tlb_miss_read, (mem_read_fn_t)address_error_read_for_write }; 
nkeynes@946
  1811
nkeynes@946
  1812
struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
nkeynes@946
  1813
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1814
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1815
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1816
        (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
nkeynes@975
  1817
        (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)address_error_read_for_write };
nkeynes@946
  1818
        
nkeynes@946
  1819
struct mem_region_fn p4_region_storequeue_sqmd_protected = {
nkeynes@946
  1820
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1821
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1822
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@946
  1823
        (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
nkeynes@975
  1824
        (mem_prefetch_fn_t)tlb_protected_read, (mem_read_fn_t)address_error_read_for_write };
nkeynes@946
  1825
.