Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 980:deb4361928fe
prev975:007bf7eb944f
next1065:bc1cc0c54917
author nkeynes
date Wed Feb 04 21:45:21 2009 +0000 (15 years ago)
permissions -rw-r--r--
last change Fix signedness in mmu_ext_page_remapped (Thanks kaz!)
Fix fallthrough case in mmu_utlb_entry_for_vpn
Fix signedness warnings in cache.c
file annotate diff log raw
nkeynes@550
     1
/**
nkeynes@586
     2
 * $Id$
nkeynes@826
     3
 *
nkeynes@953
     4
 * SH4 MMU implementation based on address space page maps. This module
nkeynes@953
     5
 * is responsible for all address decoding functions. 
nkeynes@550
     6
 *
nkeynes@550
     7
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@550
     8
 *
nkeynes@550
     9
 * This program is free software; you can redistribute it and/or modify
nkeynes@550
    10
 * it under the terms of the GNU General Public License as published by
nkeynes@550
    11
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@550
    12
 * (at your option) any later version.
nkeynes@550
    13
 *
nkeynes@550
    14
 * This program is distributed in the hope that it will be useful,
nkeynes@550
    15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@550
    16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@550
    17
 * GNU General Public License for more details.
nkeynes@550
    18
 */
nkeynes@550
    19
#define MODULE sh4_module
nkeynes@550
    20
nkeynes@550
    21
#include <stdio.h>
nkeynes@915
    22
#include <assert.h>
nkeynes@550
    23
#include "sh4/sh4mmio.h"
nkeynes@550
    24
#include "sh4/sh4core.h"
nkeynes@669
    25
#include "sh4/sh4trans.h"
nkeynes@953
    26
#include "dreamcast.h"
nkeynes@550
    27
#include "mem.h"
nkeynes@953
    28
#include "mmu.h"
nkeynes@550
    29
nkeynes@953
    30
#define RAISE_TLB_ERROR(code, vpn) sh4_raise_tlb_exception(code, vpn)
nkeynes@586
    31
#define RAISE_MEM_ERROR(code, vpn) \
nkeynes@586
    32
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@586
    33
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
nkeynes@586
    34
    sh4_raise_exception(code);
nkeynes@953
    35
#define RAISE_TLB_MULTIHIT_ERROR(vpn) sh4_raise_tlb_multihit(vpn)
nkeynes@586
    36
nkeynes@953
    37
/* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
nkeynes@953
    38
#define IS_1K_PAGE_ENTRY(ent)  ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
nkeynes@586
    39
nkeynes@953
    40
/* Primary address space (used directly by SH4 cores) */
nkeynes@953
    41
mem_region_fn_t *sh4_address_space;
nkeynes@953
    42
mem_region_fn_t *sh4_user_address_space;
nkeynes@586
    43
nkeynes@953
    44
/* Accessed from the UTLB accessor methods */
nkeynes@953
    45
uint32_t mmu_urc;
nkeynes@953
    46
uint32_t mmu_urb;
nkeynes@953
    47
static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */  
nkeynes@586
    48
nkeynes@953
    49
/* Module globals */
nkeynes@550
    50
static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
nkeynes@550
    51
static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
nkeynes@953
    52
static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
nkeynes@550
    53
static uint32_t mmu_lrui;
nkeynes@586
    54
static uint32_t mmu_asid; // current asid
nkeynes@953
    55
static struct utlb_default_regions *mmu_user_storequeue_regions;
nkeynes@550
    56
nkeynes@953
    57
/* Structures for 1K page handling */
nkeynes@953
    58
static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
nkeynes@953
    59
static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
nkeynes@953
    60
static int mmu_utlb_1k_free_index;
nkeynes@915
    61
nkeynes@550
    62
nkeynes@953
    63
/* Function prototypes */
nkeynes@550
    64
static void mmu_invalidate_tlb();
nkeynes@953
    65
static void mmu_utlb_register_all();
nkeynes@953
    66
static void mmu_utlb_remove_entry(int);
nkeynes@953
    67
static void mmu_utlb_insert_entry(int);
nkeynes@953
    68
static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
nkeynes@953
    69
static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
nkeynes@953
    70
static void mmu_set_tlb_enabled( int tlb_on );
nkeynes@953
    71
static void mmu_set_tlb_asid( uint32_t asid );
nkeynes@953
    72
static void mmu_set_storequeue_protected( int protected, int tlb_on );
nkeynes@953
    73
static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
nkeynes@953
    74
static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
nkeynes@953
    75
static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
nkeynes@953
    76
static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
nkeynes@953
    77
static void mmu_utlb_1k_init();
nkeynes@953
    78
static struct utlb_1k_entry *mmu_utlb_1k_alloc();
nkeynes@953
    79
static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
nkeynes@955
    80
static int mmu_read_urc();
nkeynes@550
    81
nkeynes@953
    82
static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
nkeynes@953
    83
static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
nkeynes@953
    84
static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
nkeynes@975
    85
static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc );
nkeynes@953
    86
static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
nkeynes@975
    87
static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc );
nkeynes@953
    88
static uint32_t get_tlb_size_mask( uint32_t flags );
nkeynes@953
    89
static uint32_t get_tlb_size_pages( uint32_t flags );
nkeynes@550
    90
nkeynes@953
    91
#define DEFAULT_REGIONS 0
nkeynes@953
    92
#define DEFAULT_STOREQUEUE_REGIONS 1
nkeynes@953
    93
#define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
nkeynes@586
    94
nkeynes@953
    95
static struct utlb_default_regions mmu_default_regions[3] = {
nkeynes@953
    96
        { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
nkeynes@953
    97
        { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
nkeynes@953
    98
        { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
nkeynes@550
    99
nkeynes@953
   100
#define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
nkeynes@550
   101
nkeynes@953
   102
/*********************** Module public functions ****************************/
nkeynes@550
   103
nkeynes@953
   104
/**
nkeynes@953
   105
 * Allocate memory for the address space maps, and initialize them according
nkeynes@953
   106
 * to the default (reset) values. (TLB is disabled by default)
nkeynes@953
   107
 */
nkeynes@953
   108
                           
nkeynes@826
   109
void MMU_init()
nkeynes@550
   110
{
nkeynes@953
   111
    sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
nkeynes@953
   112
    sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
nkeynes@953
   113
    mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
nkeynes@953
   114
    
nkeynes@953
   115
    mmu_set_tlb_enabled(0);
nkeynes@953
   116
    mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
nkeynes@953
   117
    mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );                                
nkeynes@953
   118
    
nkeynes@953
   119
    /* Setup P4 tlb/cache access regions */
nkeynes@953
   120
    mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
nkeynes@953
   121
    mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
nkeynes@953
   122
    mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
nkeynes@953
   123
    mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
nkeynes@953
   124
    mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
nkeynes@953
   125
    mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
nkeynes@953
   126
    mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
nkeynes@953
   127
    mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
nkeynes@953
   128
    mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
nkeynes@953
   129
    mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
nkeynes@953
   130
    mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
nkeynes@953
   131
    
nkeynes@953
   132
    /* Setup P4 control region */
nkeynes@953
   133
    mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
nkeynes@953
   134
    mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
nkeynes@953
   135
    mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
nkeynes@953
   136
    mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
nkeynes@953
   137
    mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
nkeynes@953
   138
    mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
nkeynes@953
   139
    mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
nkeynes@953
   140
    mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
nkeynes@953
   141
    mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
nkeynes@953
   142
    mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
nkeynes@953
   143
    mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
nkeynes@953
   144
    mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
nkeynes@953
   145
    mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
nkeynes@953
   146
    
nkeynes@953
   147
    register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
nkeynes@953
   148
    mmu_utlb_1k_init();
nkeynes@953
   149
    
nkeynes@960
   150
    /* Ensure the code regions are executable (64-bit only). Although it might
nkeynes@960
   151
     * be more portable to mmap these at runtime rather than using static decls
nkeynes@960
   152
     */
nkeynes@960
   153
#if SIZEOF_VOID_P == 8
nkeynes@953
   154
    mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
nkeynes@953
   155
    mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
nkeynes@960
   156
#endif
nkeynes@550
   157
}
nkeynes@550
   158
nkeynes@550
   159
void MMU_reset()
nkeynes@550
   160
{
nkeynes@550
   161
    mmio_region_MMU_write( CCR, 0 );
nkeynes@586
   162
    mmio_region_MMU_write( MMUCR, 0 );
nkeynes@550
   163
}
nkeynes@550
   164
nkeynes@550
   165
void MMU_save_state( FILE *f )
nkeynes@550
   166
{
nkeynes@955
   167
    mmu_read_urc();   
nkeynes@550
   168
    fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
nkeynes@550
   169
    fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
nkeynes@586
   170
    fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
nkeynes@586
   171
    fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
nkeynes@586
   172
    fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
nkeynes@586
   173
    fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
nkeynes@550
   174
}
nkeynes@550
   175
nkeynes@550
   176
int MMU_load_state( FILE *f )
nkeynes@550
   177
{
nkeynes@550
   178
    if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
nkeynes@736
   179
        return 1;
nkeynes@550
   180
    }
nkeynes@550
   181
    if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
nkeynes@736
   182
        return 1;
nkeynes@550
   183
    }
nkeynes@586
   184
    if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
nkeynes@736
   185
        return 1;
nkeynes@586
   186
    }
nkeynes@586
   187
    if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
nkeynes@736
   188
        return 1;
nkeynes@586
   189
    }
nkeynes@586
   190
    if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
nkeynes@736
   191
        return 1;
nkeynes@586
   192
    }
nkeynes@586
   193
    if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
nkeynes@736
   194
        return 1;
nkeynes@586
   195
    }
nkeynes@953
   196
nkeynes@953
   197
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@953
   198
    mmu_urc_overflow = mmu_urc >= mmu_urb;
nkeynes@953
   199
    mmu_set_tlb_enabled(mmucr&MMUCR_AT);
nkeynes@953
   200
    mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
nkeynes@550
   201
    return 0;
nkeynes@550
   202
}
nkeynes@550
   203
nkeynes@550
   204
/**
nkeynes@550
   205
 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
nkeynes@550
   206
 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
nkeynes@550
   207
 */
nkeynes@550
   208
void MMU_ldtlb()
nkeynes@550
   209
{
nkeynes@955
   210
    int urc = mmu_read_urc();
nkeynes@955
   211
    if( mmu_utlb[urc].flags & TLB_VALID )
nkeynes@955
   212
        mmu_utlb_remove_entry( urc );
nkeynes@955
   213
    mmu_utlb[urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
nkeynes@955
   214
    mmu_utlb[urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
nkeynes@955
   215
    mmu_utlb[urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
nkeynes@955
   216
    mmu_utlb[urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
nkeynes@955
   217
    mmu_utlb[urc].pcmcia = MMIO_READ(MMU, PTEA);
nkeynes@955
   218
    mmu_utlb[urc].mask = get_tlb_size_mask(mmu_utlb[urc].flags);
nkeynes@955
   219
    if( mmu_utlb[urc].flags & TLB_VALID )
nkeynes@955
   220
        mmu_utlb_insert_entry( urc );
nkeynes@550
   221
}
nkeynes@550
   222
nkeynes@953
   223
nkeynes@953
   224
MMIO_REGION_READ_FN( MMU, reg )
nkeynes@953
   225
{
nkeynes@953
   226
    reg &= 0xFFF;
nkeynes@953
   227
    switch( reg ) {
nkeynes@953
   228
    case MMUCR:
nkeynes@955
   229
        return MMIO_READ( MMU, MMUCR) | (mmu_read_urc()<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
nkeynes@953
   230
    default:
nkeynes@953
   231
        return MMIO_READ( MMU, reg );
nkeynes@953
   232
    }
nkeynes@953
   233
}
nkeynes@953
   234
nkeynes@975
   235
MMIO_REGION_READ_DEFSUBFNS(MMU)
nkeynes@975
   236
nkeynes@953
   237
MMIO_REGION_WRITE_FN( MMU, reg, val )
nkeynes@953
   238
{
nkeynes@953
   239
    uint32_t tmp;
nkeynes@953
   240
    reg &= 0xFFF;
nkeynes@953
   241
    switch(reg) {
nkeynes@953
   242
    case SH4VER:
nkeynes@953
   243
        return;
nkeynes@953
   244
    case PTEH:
nkeynes@953
   245
        val &= 0xFFFFFCFF;
nkeynes@953
   246
        if( (val & 0xFF) != mmu_asid ) {
nkeynes@953
   247
            mmu_set_tlb_asid( val&0xFF );
nkeynes@953
   248
            sh4_icache.page_vma = -1; // invalidate icache as asid has changed
nkeynes@953
   249
        }
nkeynes@953
   250
        break;
nkeynes@953
   251
    case PTEL:
nkeynes@953
   252
        val &= 0x1FFFFDFF;
nkeynes@953
   253
        break;
nkeynes@953
   254
    case PTEA:
nkeynes@953
   255
        val &= 0x0000000F;
nkeynes@953
   256
        break;
nkeynes@953
   257
    case TRA:
nkeynes@953
   258
        val &= 0x000003FC;
nkeynes@953
   259
        break;
nkeynes@953
   260
    case EXPEVT:
nkeynes@953
   261
    case INTEVT:
nkeynes@953
   262
        val &= 0x00000FFF;
nkeynes@953
   263
        break;
nkeynes@953
   264
    case MMUCR:
nkeynes@953
   265
        if( val & MMUCR_TI ) {
nkeynes@953
   266
            mmu_invalidate_tlb();
nkeynes@953
   267
        }
nkeynes@953
   268
        mmu_urc = (val >> 10) & 0x3F;
nkeynes@953
   269
        mmu_urb = (val >> 18) & 0x3F;
nkeynes@953
   270
        if( mmu_urb == 0 ) {
nkeynes@953
   271
            mmu_urb = 0x40;
nkeynes@953
   272
        } else if( mmu_urc >= mmu_urb ) {
nkeynes@953
   273
            mmu_urc_overflow = TRUE;
nkeynes@953
   274
        }
nkeynes@953
   275
        mmu_lrui = (val >> 26) & 0x3F;
nkeynes@953
   276
        val &= 0x00000301;
nkeynes@953
   277
        tmp = MMIO_READ( MMU, MMUCR );
nkeynes@953
   278
        if( (val ^ tmp) & (MMUCR_SQMD) ) {
nkeynes@953
   279
            mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
nkeynes@953
   280
        }
nkeynes@953
   281
        if( (val ^ tmp) & (MMUCR_AT) ) {
nkeynes@953
   282
            // AT flag has changed state - flush the xlt cache as all bets
nkeynes@953
   283
            // are off now. We also need to force an immediate exit from the
nkeynes@953
   284
            // current block
nkeynes@953
   285
            mmu_set_tlb_enabled( val & MMUCR_AT );
nkeynes@953
   286
            MMIO_WRITE( MMU, MMUCR, val );
nkeynes@953
   287
            sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
nkeynes@953
   288
            xlat_flush_cache(); // If we're not running, flush the cache anyway
nkeynes@953
   289
        }
nkeynes@953
   290
        break;
nkeynes@953
   291
    case CCR:
nkeynes@953
   292
        CCN_set_cache_control( val );
nkeynes@953
   293
        val &= 0x81A7;
nkeynes@953
   294
        break;
nkeynes@953
   295
    case MMUUNK1:
nkeynes@953
   296
        /* Note that if the high bit is set, this appears to reset the machine.
nkeynes@953
   297
         * Not emulating this behaviour yet until we know why...
nkeynes@953
   298
         */
nkeynes@953
   299
        val &= 0x00010007;
nkeynes@953
   300
        break;
nkeynes@953
   301
    case QACR0:
nkeynes@953
   302
    case QACR1:
nkeynes@953
   303
        val &= 0x0000001C;
nkeynes@953
   304
        break;
nkeynes@953
   305
    case PMCR1:
nkeynes@953
   306
        PMM_write_control(0, val);
nkeynes@953
   307
        val &= 0x0000C13F;
nkeynes@953
   308
        break;
nkeynes@953
   309
    case PMCR2:
nkeynes@953
   310
        PMM_write_control(1, val);
nkeynes@953
   311
        val &= 0x0000C13F;
nkeynes@953
   312
        break;
nkeynes@953
   313
    default:
nkeynes@953
   314
        break;
nkeynes@953
   315
    }
nkeynes@953
   316
    MMIO_WRITE( MMU, reg, val );
nkeynes@953
   317
}
nkeynes@953
   318
nkeynes@953
   319
/********************** 1K Page handling ***********************/
nkeynes@953
   320
/* Since we use 4K pages as our native page size, 1K pages need a bit of extra
nkeynes@953
   321
 * effort to manage - we justify this on the basis that most programs won't
nkeynes@953
   322
 * actually use 1K pages, so we may as well optimize for the common case.
nkeynes@953
   323
 * 
nkeynes@953
   324
 * Implementation uses an intermediate page entry (the utlb_1k_entry) that
nkeynes@953
   325
 * redirects requests to the 'real' page entry. These are allocated on an
nkeynes@953
   326
 * as-needed basis, and returned to the pool when all subpages are empty.
nkeynes@953
   327
 */ 
nkeynes@953
   328
static void mmu_utlb_1k_init()
nkeynes@953
   329
{
nkeynes@953
   330
    int i;
nkeynes@953
   331
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@953
   332
        mmu_utlb_1k_free_list[i] = i;
nkeynes@953
   333
        mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
nkeynes@953
   334
    }
nkeynes@953
   335
    mmu_utlb_1k_free_index = 0;
nkeynes@953
   336
}
nkeynes@953
   337
nkeynes@953
   338
static struct utlb_1k_entry *mmu_utlb_1k_alloc()
nkeynes@953
   339
{
nkeynes@953
   340
    assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
nkeynes@971
   341
    struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_list[mmu_utlb_1k_free_index++]];
nkeynes@953
   342
    return entry;
nkeynes@953
   343
}    
nkeynes@953
   344
nkeynes@953
   345
static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
nkeynes@953
   346
{
nkeynes@953
   347
    unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
nkeynes@953
   348
    assert( entryNo < UTLB_ENTRY_COUNT );
nkeynes@953
   349
    assert( mmu_utlb_1k_free_index > 0 );
nkeynes@953
   350
    mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
nkeynes@953
   351
}
nkeynes@953
   352
nkeynes@953
   353
nkeynes@953
   354
/********************** Address space maintenance *************************/
nkeynes@953
   355
nkeynes@953
   356
/**
nkeynes@953
   357
 * MMU accessor functions just increment URC - fixup here if necessary
nkeynes@953
   358
 */
nkeynes@955
   359
static int mmu_read_urc()
nkeynes@953
   360
{
nkeynes@953
   361
    if( mmu_urc_overflow ) {
nkeynes@953
   362
        if( mmu_urc >= 0x40 ) {
nkeynes@953
   363
            mmu_urc_overflow = FALSE;
nkeynes@953
   364
            mmu_urc -= 0x40;
nkeynes@953
   365
            mmu_urc %= mmu_urb;
nkeynes@953
   366
        }
nkeynes@953
   367
    } else {
nkeynes@953
   368
        mmu_urc %= mmu_urb;
nkeynes@953
   369
    }
nkeynes@955
   370
    return mmu_urc;
nkeynes@953
   371
}
nkeynes@953
   372
nkeynes@953
   373
static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
nkeynes@953
   374
{
nkeynes@953
   375
    int count = (end - start) >> 12;
nkeynes@953
   376
    mem_region_fn_t *ptr = &sh4_address_space[start>>12];
nkeynes@953
   377
    while( count-- > 0 ) {
nkeynes@953
   378
        *ptr++ = fn;
nkeynes@953
   379
    }
nkeynes@953
   380
}
nkeynes@953
   381
static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
nkeynes@953
   382
{
nkeynes@953
   383
    int count = (end - start) >> 12;
nkeynes@953
   384
    mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
nkeynes@953
   385
    while( count-- > 0 ) {
nkeynes@953
   386
        *ptr++ = fn;
nkeynes@953
   387
    }
nkeynes@953
   388
}
nkeynes@953
   389
nkeynes@953
   390
static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
nkeynes@953
   391
{
nkeynes@980
   392
    unsigned int i;
nkeynes@953
   393
    if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
nkeynes@953
   394
        /* TLB on */
nkeynes@953
   395
        sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
nkeynes@953
   396
        sh4_address_space[(page|0xA0000000)>>12] = fn;
nkeynes@953
   397
        /* Scan UTLB and update any direct-referencing entries */
nkeynes@953
   398
    } else {
nkeynes@953
   399
        /* Direct map to U0, P0, P1, P2, P3 */
nkeynes@953
   400
        for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
nkeynes@953
   401
            sh4_address_space[(page|i)>>12] = fn;
nkeynes@953
   402
        }
nkeynes@953
   403
        for( i=0; i < 0x80000000; i+= 0x20000000 ) {
nkeynes@953
   404
            sh4_user_address_space[(page|i)>>12] = fn;
nkeynes@953
   405
        }
nkeynes@953
   406
    }
nkeynes@963
   407
    return TRUE;
nkeynes@953
   408
}
nkeynes@953
   409
nkeynes@953
   410
static void mmu_set_tlb_enabled( int tlb_on )
nkeynes@953
   411
{
nkeynes@953
   412
    mem_region_fn_t *ptr, *uptr;
nkeynes@953
   413
    int i;
nkeynes@953
   414
    
nkeynes@953
   415
    /* Reset the storequeue area */
nkeynes@953
   416
nkeynes@953
   417
    if( tlb_on ) {
nkeynes@953
   418
        mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
nkeynes@953
   419
        mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
nkeynes@953
   420
        mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
nkeynes@953
   421
        
nkeynes@953
   422
        /* Default SQ prefetch goes to TLB miss (?) */
nkeynes@953
   423
        mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
nkeynes@953
   424
        mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
nkeynes@953
   425
        mmu_utlb_register_all();
nkeynes@953
   426
    } else {
nkeynes@953
   427
        for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
nkeynes@953
   428
            memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
nkeynes@953
   429
        }
nkeynes@953
   430
        for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
nkeynes@953
   431
            memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
nkeynes@953
   432
        }
nkeynes@953
   433
nkeynes@953
   434
        mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
nkeynes@953
   435
        if( IS_STOREQUEUE_PROTECTED() ) {
nkeynes@953
   436
            mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
nkeynes@953
   437
        } else {
nkeynes@953
   438
            mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
nkeynes@953
   439
        }
nkeynes@953
   440
    }
nkeynes@953
   441
    
nkeynes@953
   442
}
nkeynes@953
   443
nkeynes@953
   444
/**
nkeynes@953
   445
 * Flip the SQMD switch - this is rather expensive, so will need to be changed if
nkeynes@953
   446
 * anything expects to do this frequently.
nkeynes@953
   447
 */
nkeynes@953
   448
static void mmu_set_storequeue_protected( int protected, int tlb_on ) 
nkeynes@953
   449
{
nkeynes@953
   450
    mem_region_fn_t nontlb_region;
nkeynes@953
   451
    int i;
nkeynes@953
   452
nkeynes@953
   453
    if( protected ) {
nkeynes@953
   454
        mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
nkeynes@953
   455
        nontlb_region = &p4_region_storequeue_sqmd;
nkeynes@953
   456
    } else {
nkeynes@953
   457
        mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
nkeynes@953
   458
        nontlb_region = &p4_region_storequeue; 
nkeynes@953
   459
    }
nkeynes@953
   460
nkeynes@953
   461
    if( tlb_on ) {
nkeynes@953
   462
        mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
nkeynes@953
   463
        for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@953
   464
            if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
nkeynes@953
   465
                mmu_utlb_insert_entry(i);
nkeynes@953
   466
            }
nkeynes@953
   467
        }
nkeynes@953
   468
    } else {
nkeynes@953
   469
        mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region ); 
nkeynes@953
   470
    }
nkeynes@953
   471
    
nkeynes@953
   472
}
nkeynes@953
   473
nkeynes@953
   474
static void mmu_set_tlb_asid( uint32_t asid )
nkeynes@953
   475
{
nkeynes@953
   476
    /* Scan for pages that need to be remapped */
nkeynes@953
   477
    int i;
nkeynes@953
   478
    if( IS_SV_ENABLED() ) {
nkeynes@953
   479
        for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@971
   480
            if( mmu_utlb[i].asid == mmu_asid && 
nkeynes@971
   481
                (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
nkeynes@971
   482
                // Matches old ASID - unmap out
nkeynes@971
   483
                if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
nkeynes@971
   484
                        get_tlb_size_pages(mmu_utlb[i].flags) ) )
nkeynes@971
   485
                    mmu_utlb_remap_pages( FALSE, TRUE, i );
nkeynes@971
   486
            }
nkeynes@971
   487
        }
nkeynes@971
   488
        for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@971
   489
            if( mmu_utlb[i].asid == asid && 
nkeynes@971
   490
                (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
nkeynes@971
   491
                // Matches new ASID - map in
nkeynes@971
   492
                mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn, 
nkeynes@971
   493
                        mmu_utlb[i].vpn&mmu_utlb[i].mask, 
nkeynes@971
   494
                        get_tlb_size_pages(mmu_utlb[i].flags) );
nkeynes@953
   495
            }
nkeynes@953
   496
        }
nkeynes@953
   497
    } else {
nkeynes@953
   498
        // Remap both Priv+user pages
nkeynes@953
   499
        for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@971
   500
            if( mmu_utlb[i].asid == mmu_asid &&
nkeynes@971
   501
                (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
nkeynes@971
   502
                if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
nkeynes@971
   503
                        get_tlb_size_pages(mmu_utlb[i].flags) ) )
nkeynes@971
   504
                    mmu_utlb_remap_pages( TRUE, TRUE, i );
nkeynes@971
   505
            }
nkeynes@971
   506
        }
nkeynes@971
   507
        for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@971
   508
            if( mmu_utlb[i].asid == asid &&
nkeynes@971
   509
                (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
nkeynes@971
   510
                mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn, 
nkeynes@971
   511
                        mmu_utlb[i].vpn&mmu_utlb[i].mask, 
nkeynes@971
   512
                        get_tlb_size_pages(mmu_utlb[i].flags) );  
nkeynes@953
   513
            }
nkeynes@953
   514
        }
nkeynes@953
   515
    }
nkeynes@953
   516
    
nkeynes@953
   517
    mmu_asid = asid;
nkeynes@953
   518
}
nkeynes@953
   519
nkeynes@953
   520
static uint32_t get_tlb_size_mask( uint32_t flags )
nkeynes@953
   521
{
nkeynes@953
   522
    switch( flags & TLB_SIZE_MASK ) {
nkeynes@953
   523
    case TLB_SIZE_1K: return MASK_1K;
nkeynes@953
   524
    case TLB_SIZE_4K: return MASK_4K;
nkeynes@953
   525
    case TLB_SIZE_64K: return MASK_64K;
nkeynes@953
   526
    case TLB_SIZE_1M: return MASK_1M;
nkeynes@953
   527
    default: return 0; /* Unreachable */
nkeynes@953
   528
    }
nkeynes@953
   529
}
nkeynes@953
   530
static uint32_t get_tlb_size_pages( uint32_t flags )
nkeynes@953
   531
{
nkeynes@953
   532
    switch( flags & TLB_SIZE_MASK ) {
nkeynes@953
   533
    case TLB_SIZE_1K: return 0;
nkeynes@953
   534
    case TLB_SIZE_4K: return 1;
nkeynes@953
   535
    case TLB_SIZE_64K: return 16;
nkeynes@953
   536
    case TLB_SIZE_1M: return 256;
nkeynes@953
   537
    default: return 0; /* Unreachable */
nkeynes@953
   538
    }
nkeynes@953
   539
}
nkeynes@953
   540
nkeynes@953
   541
/**
nkeynes@953
   542
 * Add a new TLB entry mapping to the address space table. If any of the pages
nkeynes@953
   543
 * are already mapped, they are mapped to the TLB multi-hit page instead.
nkeynes@953
   544
 * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
nkeynes@953
   545
 */ 
nkeynes@953
   546
static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
nkeynes@953
   547
{
nkeynes@953
   548
    mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
nkeynes@953
   549
    mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
nkeynes@953
   550
    struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
nkeynes@953
   551
    struct utlb_default_regions *userdefs = privdefs;    
nkeynes@953
   552
    
nkeynes@953
   553
    gboolean mapping_ok = TRUE;
nkeynes@953
   554
    int i;
nkeynes@953
   555
    
nkeynes@953
   556
    if( (start_addr & 0xFC000000) == 0xE0000000 ) {
nkeynes@953
   557
        /* Storequeue mapping */
nkeynes@953
   558
        privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
nkeynes@953
   559
        userdefs = mmu_user_storequeue_regions;
nkeynes@953
   560
    } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
nkeynes@953
   561
        user_page = NULL; /* No user access to P3 region */
nkeynes@953
   562
    } else if( start_addr >= 0x80000000 ) {
nkeynes@953
   563
        return TRUE; // No mapping - legal but meaningless
nkeynes@953
   564
    }
nkeynes@953
   565
nkeynes@953
   566
    if( npages == 0 ) {
nkeynes@953
   567
        struct utlb_1k_entry *ent;
nkeynes@953
   568
        int i, idx = (start_addr >> 10) & 0x03;
nkeynes@953
   569
        if( IS_1K_PAGE_ENTRY(*ptr) ) {
nkeynes@953
   570
            ent = (struct utlb_1k_entry *)*ptr;
nkeynes@953
   571
        } else {
nkeynes@953
   572
            ent = mmu_utlb_1k_alloc();
nkeynes@953
   573
            /* New 1K struct - init to previous contents of region */
nkeynes@953
   574
            for( i=0; i<4; i++ ) {
nkeynes@953
   575
                ent->subpages[i] = *ptr;
nkeynes@953
   576
                ent->user_subpages[i] = *uptr;
nkeynes@953
   577
            }
nkeynes@953
   578
            *ptr = &ent->fn;
nkeynes@953
   579
            *uptr = &ent->user_fn;
nkeynes@953
   580
        }
nkeynes@953
   581
        
nkeynes@953
   582
        if( priv_page != NULL ) {
nkeynes@953
   583
            if( ent->subpages[idx] == privdefs->tlb_miss ) {
nkeynes@953
   584
                ent->subpages[idx] = priv_page;
nkeynes@953
   585
            } else {
nkeynes@953
   586
                mapping_ok = FALSE;
nkeynes@953
   587
                ent->subpages[idx] = privdefs->tlb_multihit;
nkeynes@953
   588
            }
nkeynes@953
   589
        }
nkeynes@953
   590
        if( user_page != NULL ) {
nkeynes@953
   591
            if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
nkeynes@953
   592
                ent->user_subpages[idx] = user_page;
nkeynes@953
   593
            } else {
nkeynes@953
   594
                mapping_ok = FALSE;
nkeynes@953
   595
                ent->user_subpages[idx] = userdefs->tlb_multihit;
nkeynes@953
   596
            }
nkeynes@953
   597
        }
nkeynes@953
   598
        
nkeynes@953
   599
    } else {
nkeynes@953
   600
        if( priv_page != NULL ) {
nkeynes@953
   601
            /* Privileged mapping only */
nkeynes@953
   602
            for( i=0; i<npages; i++ ) {
nkeynes@953
   603
                if( *ptr == privdefs->tlb_miss ) {
nkeynes@953
   604
                    *ptr++ = priv_page;
nkeynes@953
   605
                } else {
nkeynes@953
   606
                    mapping_ok = FALSE;
nkeynes@953
   607
                    *ptr++ = privdefs->tlb_multihit;
nkeynes@953
   608
                }
nkeynes@953
   609
            }
nkeynes@953
   610
        }
nkeynes@953
   611
        if( user_page != NULL ) {
nkeynes@953
   612
            /* User mapping only (eg ASID change remap w/ SV=1) */
nkeynes@953
   613
            for( i=0; i<npages; i++ ) {
nkeynes@953
   614
                if( *uptr == userdefs->tlb_miss ) {
nkeynes@953
   615
                    *uptr++ = user_page;
nkeynes@953
   616
                } else {
nkeynes@953
   617
                    mapping_ok = FALSE;
nkeynes@953
   618
                    *uptr++ = userdefs->tlb_multihit;
nkeynes@953
   619
                }
nkeynes@953
   620
            }        
nkeynes@953
   621
        }
nkeynes@953
   622
    }
nkeynes@953
   623
nkeynes@953
   624
    return mapping_ok;
nkeynes@953
   625
}
nkeynes@953
   626
nkeynes@953
   627
/**
nkeynes@953
   628
 * Remap any pages within the region covered by entryNo, but not including 
nkeynes@953
   629
 * entryNo itself. This is used to reestablish pages that were previously
nkeynes@953
   630
 * covered by a multi-hit exception region when one of the pages is removed.
nkeynes@953
   631
 */
nkeynes@953
   632
static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
nkeynes@953
   633
{
nkeynes@953
   634
    int mask = mmu_utlb[entryNo].mask;
nkeynes@953
   635
    uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
nkeynes@953
   636
    int i;
nkeynes@953
   637
    
nkeynes@953
   638
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@953
   639
        if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
nkeynes@953
   640
            /* Overlapping region */
nkeynes@953
   641
            mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
nkeynes@953
   642
            mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
nkeynes@953
   643
            uint32_t start_addr;
nkeynes@953
   644
            int npages;
nkeynes@953
   645
nkeynes@953
   646
            if( mmu_utlb[i].mask >= mask ) {
nkeynes@953
   647
                /* entry is no larger than the area we're replacing - map completely */
nkeynes@953
   648
                start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
nkeynes@953
   649
                npages = get_tlb_size_pages( mmu_utlb[i].flags );
nkeynes@953
   650
            } else {
nkeynes@953
   651
                /* Otherwise map subset - region covered by removed page */
nkeynes@953
   652
                start_addr = remap_addr;
nkeynes@953
   653
                npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
nkeynes@953
   654
            }
nkeynes@953
   655
nkeynes@953
   656
            if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) { 
nkeynes@953
   657
                mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
nkeynes@953
   658
            } else if( IS_SV_ENABLED() ) {
nkeynes@953
   659
                mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
nkeynes@953
   660
            }
nkeynes@953
   661
nkeynes@953
   662
        }
nkeynes@953
   663
    }
nkeynes@953
   664
}
nkeynes@953
   665
nkeynes@953
   666
/**
nkeynes@953
   667
 * Remove a previous TLB mapping (replacing them with the TLB miss region).
nkeynes@953
   668
 * @return FALSE if any pages were previously mapped to the TLB multihit page, 
nkeynes@953
   669
 * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
nkeynes@953
   670
 */
nkeynes@953
   671
static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
nkeynes@953
   672
{
nkeynes@953
   673
    mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
nkeynes@953
   674
    mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
nkeynes@953
   675
    struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
nkeynes@953
   676
    struct utlb_default_regions *userdefs = privdefs;
nkeynes@953
   677
nkeynes@953
   678
    gboolean unmapping_ok = TRUE;
nkeynes@953
   679
    int i;
nkeynes@953
   680
    
nkeynes@953
   681
    if( (start_addr & 0xFC000000) == 0xE0000000 ) {
nkeynes@953
   682
        /* Storequeue mapping */
nkeynes@953
   683
        privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
nkeynes@953
   684
        userdefs = mmu_user_storequeue_regions;
nkeynes@953
   685
    } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
nkeynes@953
   686
        unmap_user = FALSE;
nkeynes@953
   687
    } else if( start_addr >= 0x80000000 ) {
nkeynes@953
   688
        return TRUE; // No mapping - legal but meaningless
nkeynes@953
   689
    }
nkeynes@953
   690
nkeynes@953
   691
    if( npages == 0 ) { // 1K page
nkeynes@953
   692
        assert( IS_1K_PAGE_ENTRY( *ptr ) );
nkeynes@953
   693
        struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
nkeynes@953
   694
        int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
nkeynes@953
   695
        if( ent->subpages[idx] == privdefs->tlb_multihit ) {
nkeynes@953
   696
            unmapping_ok = FALSE;
nkeynes@953
   697
        }
nkeynes@953
   698
        if( unmap_priv )
nkeynes@953
   699
            ent->subpages[idx] = privdefs->tlb_miss;
nkeynes@953
   700
        if( unmap_user )
nkeynes@953
   701
            ent->user_subpages[idx] = userdefs->tlb_miss;
nkeynes@953
   702
nkeynes@953
   703
        /* If all 4 subpages have the same content, merge them together and
nkeynes@953
   704
         * release the 1K entry
nkeynes@953
   705
         */
nkeynes@953
   706
        mem_region_fn_t priv_page = ent->subpages[0];
nkeynes@953
   707
        mem_region_fn_t user_page = ent->user_subpages[0];
nkeynes@953
   708
        for( i=1; i<4; i++ ) {
nkeynes@953
   709
            if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
nkeynes@953
   710
                mergeable = 0;
nkeynes@953
   711
                break;
nkeynes@953
   712
            }
nkeynes@953
   713
        }
nkeynes@953
   714
        if( mergeable ) {
nkeynes@953
   715
            mmu_utlb_1k_free(ent);
nkeynes@953
   716
            *ptr = priv_page;
nkeynes@953
   717
            *uptr = user_page;
nkeynes@953
   718
        }
nkeynes@953
   719
    } else {
nkeynes@953
   720
        if( unmap_priv ) {
nkeynes@953
   721
            /* Privileged (un)mapping */
nkeynes@953
   722
            for( i=0; i<npages; i++ ) {
nkeynes@953
   723
                if( *ptr == privdefs->tlb_multihit ) {
nkeynes@953
   724
                    unmapping_ok = FALSE;
nkeynes@953
   725
                }
nkeynes@953
   726
                *ptr++ = privdefs->tlb_miss;
nkeynes@953
   727
            }
nkeynes@953
   728
        }
nkeynes@953
   729
        if( unmap_user ) {
nkeynes@953
   730
            /* User (un)mapping */
nkeynes@953
   731
            for( i=0; i<npages; i++ ) {
nkeynes@953
   732
                if( *uptr == userdefs->tlb_multihit ) {
nkeynes@953
   733
                    unmapping_ok = FALSE;
nkeynes@953
   734
                }
nkeynes@953
   735
                *uptr++ = userdefs->tlb_miss;
nkeynes@953
   736
            }            
nkeynes@953
   737
        }
nkeynes@953
   738
    }
nkeynes@953
   739
    
nkeynes@953
   740
    return unmapping_ok;
nkeynes@953
   741
}
nkeynes@953
   742
nkeynes@953
   743
static void mmu_utlb_insert_entry( int entry )
nkeynes@953
   744
{
nkeynes@953
   745
    struct utlb_entry *ent = &mmu_utlb[entry];
nkeynes@953
   746
    mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
nkeynes@953
   747
    mem_region_fn_t upage;
nkeynes@953
   748
    sh4addr_t start_addr = ent->vpn & ent->mask;
nkeynes@953
   749
    int npages = get_tlb_size_pages(ent->flags);
nkeynes@953
   750
nkeynes@953
   751
    if( (start_addr & 0xFC000000) == 0xE0000000 ) {
nkeynes@953
   752
        /* Store queue mappings are a bit different - normal access is fixed to
nkeynes@953
   753
         * the store queue register block, and we only map prefetches through
nkeynes@953
   754
         * the TLB 
nkeynes@953
   755
         */
nkeynes@953
   756
        mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
nkeynes@953
   757
nkeynes@953
   758
        if( (ent->flags & TLB_USERMODE) == 0 ) {
nkeynes@953
   759
            upage = mmu_user_storequeue_regions->tlb_prot;
nkeynes@953
   760
        } else if( IS_STOREQUEUE_PROTECTED() ) {
nkeynes@953
   761
            upage = &p4_region_storequeue_sqmd;
nkeynes@953
   762
        } else {
nkeynes@953
   763
            upage = page;
nkeynes@953
   764
        }
nkeynes@953
   765
nkeynes@953
   766
    }  else {
nkeynes@953
   767
nkeynes@953
   768
        if( (ent->flags & TLB_USERMODE) == 0 ) {
nkeynes@953
   769
            upage = &mem_region_tlb_protected;
nkeynes@953
   770
        } else {        
nkeynes@953
   771
            upage = page;
nkeynes@953
   772
        }
nkeynes@953
   773
nkeynes@953
   774
        if( (ent->flags & TLB_WRITABLE) == 0 ) {
nkeynes@953
   775
            page->write_long = (mem_write_fn_t)tlb_protected_write;
nkeynes@953
   776
            page->write_word = (mem_write_fn_t)tlb_protected_write;
nkeynes@953
   777
            page->write_byte = (mem_write_fn_t)tlb_protected_write;
nkeynes@953
   778
            page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
nkeynes@975
   779
            page->read_byte_for_write = (mem_read_fn_t)tlb_protected_read_for_write;
nkeynes@953
   780
            mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
nkeynes@953
   781
        } else if( (ent->flags & TLB_DIRTY) == 0 ) {
nkeynes@953
   782
            page->write_long = (mem_write_fn_t)tlb_initial_write;
nkeynes@953
   783
            page->write_word = (mem_write_fn_t)tlb_initial_write;
nkeynes@953
   784
            page->write_byte = (mem_write_fn_t)tlb_initial_write;
nkeynes@953
   785
            page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
nkeynes@975
   786
            page->read_byte_for_write = (mem_read_fn_t)tlb_initial_read_for_write;
nkeynes@953
   787
            mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
nkeynes@953
   788
        } else {
nkeynes@953
   789
            mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
nkeynes@953
   790
        }
nkeynes@953
   791
    }
nkeynes@953
   792
    
nkeynes@953
   793
    mmu_utlb_pages[entry].user_fn = upage;
nkeynes@953
   794
nkeynes@953
   795
    /* Is page visible? */
nkeynes@953
   796
    if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) { 
nkeynes@953
   797
        mmu_utlb_map_pages( page, upage, start_addr, npages );
nkeynes@953
   798
    } else if( IS_SV_ENABLED() ) {
nkeynes@953
   799
        mmu_utlb_map_pages( page, NULL, start_addr, npages );
nkeynes@953
   800
    }
nkeynes@953
   801
}
nkeynes@953
   802
nkeynes@953
   803
static void mmu_utlb_remove_entry( int entry )
nkeynes@953
   804
{
nkeynes@953
   805
    int i, j;
nkeynes@953
   806
    struct utlb_entry *ent = &mmu_utlb[entry];
nkeynes@953
   807
    sh4addr_t start_addr = ent->vpn&ent->mask;
nkeynes@953
   808
    mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
nkeynes@953
   809
    mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
nkeynes@953
   810
    gboolean unmap_user;
nkeynes@953
   811
    int npages = get_tlb_size_pages(ent->flags);
nkeynes@953
   812
    
nkeynes@953
   813
    if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
nkeynes@953
   814
        unmap_user = TRUE;
nkeynes@953
   815
    } else if( IS_SV_ENABLED() ) {
nkeynes@953
   816
        unmap_user = FALSE;
nkeynes@953
   817
    } else {
nkeynes@953
   818
        return; // Not mapped
nkeynes@953
   819
    }
nkeynes@953
   820
    
nkeynes@953
   821
    gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
nkeynes@953
   822
    
nkeynes@953
   823
    if( !clean_unmap ) {
nkeynes@953
   824
        mmu_utlb_remap_pages( TRUE, unmap_user, entry );
nkeynes@953
   825
    }
nkeynes@953
   826
}
nkeynes@953
   827
nkeynes@953
   828
static void mmu_utlb_register_all()
nkeynes@953
   829
{
nkeynes@953
   830
    int i;
nkeynes@953
   831
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@953
   832
        if( mmu_utlb[i].flags & TLB_VALID ) 
nkeynes@953
   833
            mmu_utlb_insert_entry( i );
nkeynes@953
   834
    }
nkeynes@953
   835
}
nkeynes@953
   836
nkeynes@550
   837
static void mmu_invalidate_tlb()
nkeynes@550
   838
{
nkeynes@550
   839
    int i;
nkeynes@550
   840
    for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
nkeynes@736
   841
        mmu_itlb[i].flags &= (~TLB_VALID);
nkeynes@550
   842
    }
nkeynes@953
   843
    if( IS_TLB_ENABLED() ) {
nkeynes@953
   844
        for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@953
   845
            if( mmu_utlb[i].flags & TLB_VALID ) {
nkeynes@953
   846
                mmu_utlb_remove_entry( i );
nkeynes@953
   847
            }
nkeynes@953
   848
        }
nkeynes@953
   849
    }
nkeynes@550
   850
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@736
   851
        mmu_utlb[i].flags &= (~TLB_VALID);
nkeynes@550
   852
    }
nkeynes@550
   853
}
nkeynes@586
   854
nkeynes@586
   855
/******************************************************************************/
nkeynes@586
   856
/*                        MMU TLB address translation                         */
nkeynes@586
   857
/******************************************************************************/
nkeynes@586
   858
nkeynes@586
   859
/**
nkeynes@953
   860
 * Translate a 32-bit address into a UTLB entry number. Does not check for
nkeynes@953
   861
 * page protection etc.
nkeynes@953
   862
 * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
nkeynes@586
   863
 */
nkeynes@953
   864
int mmu_utlb_entry_for_vpn( uint32_t vpn )
nkeynes@953
   865
{
nkeynes@973
   866
    mmu_urc++;
nkeynes@953
   867
    mem_region_fn_t fn = sh4_address_space[vpn>>12];
nkeynes@953
   868
    if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
nkeynes@953
   869
        return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
nkeynes@973
   870
    } else if( fn >= &mmu_utlb_1k_pages[0].fn && fn < &mmu_utlb_1k_pages[UTLB_ENTRY_COUNT].fn ) {
nkeynes@973
   871
        struct utlb_1k_entry *ent = (struct utlb_1k_entry *)fn;
nkeynes@973
   872
        fn = ent->subpages[(vpn>>10)&0x03];
nkeynes@973
   873
        if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
nkeynes@973
   874
            return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
nkeynes@973
   875
        }            
nkeynes@980
   876
    } 
nkeynes@980
   877
    if( fn == &mem_region_tlb_multihit ) {
nkeynes@953
   878
        return -2;
nkeynes@953
   879
    } else {
nkeynes@953
   880
        return -1;
nkeynes@953
   881
    }
nkeynes@953
   882
}
nkeynes@953
   883
nkeynes@586
   884
nkeynes@586
   885
/**
nkeynes@586
   886
 * Perform the actual utlb lookup w/ asid matching.
nkeynes@586
   887
 * Possible utcomes are:
nkeynes@586
   888
 *   0..63 Single match - good, return entry found
nkeynes@586
   889
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
   890
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
   891
 * @param vpn virtual address to resolve
nkeynes@586
   892
 * @return the resultant UTLB entry, or an error.
nkeynes@586
   893
 */
nkeynes@586
   894
static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
nkeynes@586
   895
{
nkeynes@586
   896
    int result = -1;
nkeynes@586
   897
    unsigned int i;
nkeynes@586
   898
nkeynes@586
   899
    mmu_urc++;
nkeynes@586
   900
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@736
   901
        mmu_urc = 0;
nkeynes@586
   902
    }
nkeynes@586
   903
nkeynes@586
   904
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@736
   905
        if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@826
   906
                ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
nkeynes@736
   907
                ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@736
   908
            if( result != -1 ) {
nkeynes@736
   909
                return -2;
nkeynes@736
   910
            }
nkeynes@736
   911
            result = i;
nkeynes@736
   912
        }
nkeynes@586
   913
    }
nkeynes@586
   914
    return result;
nkeynes@586
   915
}
nkeynes@586
   916
nkeynes@586
   917
/**
nkeynes@586
   918
 * Perform the actual utlb lookup matching on vpn only
nkeynes@586
   919
 * Possible utcomes are:
nkeynes@586
   920
 *   0..63 Single match - good, return entry found
nkeynes@586
   921
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
   922
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
   923
 * @param vpn virtual address to resolve
nkeynes@586
   924
 * @return the resultant UTLB entry, or an error.
nkeynes@586
   925
 */
nkeynes@586
   926
static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
nkeynes@586
   927
{
nkeynes@586
   928
    int result = -1;
nkeynes@586
   929
    unsigned int i;
nkeynes@586
   930
nkeynes@586
   931
    mmu_urc++;
nkeynes@586
   932
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@736
   933
        mmu_urc = 0;
nkeynes@586
   934
    }
nkeynes@586
   935
nkeynes@586
   936
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@736
   937
        if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@736
   938
                ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@736
   939
            if( result != -1 ) {
nkeynes@736
   940
                return -2;
nkeynes@736
   941
            }
nkeynes@736
   942
            result = i;
nkeynes@736
   943
        }
nkeynes@586
   944
    }
nkeynes@586
   945
nkeynes@586
   946
    return result;
nkeynes@586
   947
}
nkeynes@586
   948
nkeynes@586
   949
/**
nkeynes@586
   950
 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
nkeynes@586
   951
 * @return the number (0-3) of the replaced entry.
nkeynes@586
   952
 */
nkeynes@586
   953
static int inline mmu_itlb_update_from_utlb( int entryNo )
nkeynes@586
   954
{
nkeynes@586
   955
    int replace;
nkeynes@586
   956
    /* Determine entry to replace based on lrui */
nkeynes@586
   957
    if( (mmu_lrui & 0x38) == 0x38 ) {
nkeynes@736
   958
        replace = 0;
nkeynes@736
   959
        mmu_lrui = mmu_lrui & 0x07;
nkeynes@586
   960
    } else if( (mmu_lrui & 0x26) == 0x06 ) {
nkeynes@736
   961
        replace = 1;
nkeynes@736
   962
        mmu_lrui = (mmu_lrui & 0x19) | 0x20;
nkeynes@586
   963
    } else if( (mmu_lrui & 0x15) == 0x01 ) {
nkeynes@736
   964
        replace = 2;
nkeynes@736
   965
        mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
nkeynes@586
   966
    } else { // Note - gets invalid entries too
nkeynes@736
   967
        replace = 3;
nkeynes@736
   968
        mmu_lrui = (mmu_lrui | 0x0B);
nkeynes@826
   969
    }
nkeynes@586
   970
nkeynes@586
   971
    mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
nkeynes@586
   972
    mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
nkeynes@586
   973
    mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
nkeynes@586
   974
    mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
nkeynes@586
   975
    mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
nkeynes@586
   976
    return replace;
nkeynes@586
   977
}
nkeynes@586
   978
nkeynes@586
   979
/**
nkeynes@586
   980
 * Perform the actual itlb lookup w/ asid protection
nkeynes@586
   981
 * Possible utcomes are:
nkeynes@586
   982
 *   0..63 Single match - good, return entry found
nkeynes@586
   983
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
   984
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
   985
 * @param vpn virtual address to resolve
nkeynes@586
   986
 * @return the resultant ITLB entry, or an error.
nkeynes@586
   987
 */
nkeynes@586
   988
static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
nkeynes@586
   989
{
nkeynes@586
   990
    int result = -1;
nkeynes@586
   991
    unsigned int i;
nkeynes@586
   992
nkeynes@586
   993
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@736
   994
        if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@826
   995
                ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
nkeynes@736
   996
                ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@736
   997
            if( result != -1 ) {
nkeynes@736
   998
                return -2;
nkeynes@736
   999
            }
nkeynes@736
  1000
            result = i;
nkeynes@736
  1001
        }
nkeynes@586
  1002
    }
nkeynes@586
  1003
nkeynes@586
  1004
    if( result == -1 ) {
nkeynes@953
  1005
        int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
nkeynes@736
  1006
        if( utlbEntry < 0 ) {
nkeynes@736
  1007
            return utlbEntry;
nkeynes@736
  1008
        } else {
nkeynes@736
  1009
            return mmu_itlb_update_from_utlb( utlbEntry );
nkeynes@736
  1010
        }
nkeynes@586
  1011
    }
nkeynes@586
  1012
nkeynes@586
  1013
    switch( result ) {
nkeynes@586
  1014
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@586
  1015
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@586
  1016
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@586
  1017
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@586
  1018
    }
nkeynes@736
  1019
nkeynes@586
  1020
    return result;
nkeynes@586
  1021
}
nkeynes@586
  1022
nkeynes@586
  1023
/**
nkeynes@586
  1024
 * Perform the actual itlb lookup on vpn only
nkeynes@586
  1025
 * Possible utcomes are:
nkeynes@586
  1026
 *   0..63 Single match - good, return entry found
nkeynes@586
  1027
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
  1028
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
  1029
 * @param vpn virtual address to resolve
nkeynes@586
  1030
 * @return the resultant ITLB entry, or an error.
nkeynes@586
  1031
 */
nkeynes@586
  1032
static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
nkeynes@586
  1033
{
nkeynes@586
  1034
    int result = -1;
nkeynes@586
  1035
    unsigned int i;
nkeynes@586
  1036
nkeynes@586
  1037
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@736
  1038
        if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@736
  1039
                ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@736
  1040
            if( result != -1 ) {
nkeynes@736
  1041
                return -2;
nkeynes@736
  1042
            }
nkeynes@736
  1043
            result = i;
nkeynes@736
  1044
        }
nkeynes@586
  1045
    }
nkeynes@586
  1046
nkeynes@586
  1047
    if( result == -1 ) {
nkeynes@736
  1048
        int utlbEntry = mmu_utlb_lookup_vpn( vpn );
nkeynes@736
  1049
        if( utlbEntry < 0 ) {
nkeynes@736
  1050
            return utlbEntry;
nkeynes@736
  1051
        } else {
nkeynes@736
  1052
            return mmu_itlb_update_from_utlb( utlbEntry );
nkeynes@736
  1053
        }
nkeynes@586
  1054
    }
nkeynes@586
  1055
nkeynes@586
  1056
    switch( result ) {
nkeynes@586
  1057
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@586
  1058
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@586
  1059
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@586
  1060
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@586
  1061
    }
nkeynes@736
  1062
nkeynes@586
  1063
    return result;
nkeynes@586
  1064
}
nkeynes@927
  1065
nkeynes@586
  1066
/**
nkeynes@586
  1067
 * Update the icache for an untranslated address
nkeynes@586
  1068
 */
nkeynes@905
  1069
static inline void mmu_update_icache_phys( sh4addr_t addr )
nkeynes@586
  1070
{
nkeynes@586
  1071
    if( (addr & 0x1C000000) == 0x0C000000 ) {
nkeynes@736
  1072
        /* Main ram */
nkeynes@736
  1073
        sh4_icache.page_vma = addr & 0xFF000000;
nkeynes@736
  1074
        sh4_icache.page_ppa = 0x0C000000;
nkeynes@736
  1075
        sh4_icache.mask = 0xFF000000;
nkeynes@953
  1076
        sh4_icache.page = dc_main_ram;
nkeynes@586
  1077
    } else if( (addr & 0x1FE00000) == 0 ) {
nkeynes@736
  1078
        /* BIOS ROM */
nkeynes@736
  1079
        sh4_icache.page_vma = addr & 0xFFE00000;
nkeynes@736
  1080
        sh4_icache.page_ppa = 0;
nkeynes@736
  1081
        sh4_icache.mask = 0xFFE00000;
nkeynes@953
  1082
        sh4_icache.page = dc_boot_rom;
nkeynes@586
  1083
    } else {
nkeynes@736
  1084
        /* not supported */
nkeynes@736
  1085
        sh4_icache.page_vma = -1;
nkeynes@586
  1086
    }
nkeynes@586
  1087
}
nkeynes@586
  1088
nkeynes@586
  1089
/**
nkeynes@586
  1090
 * Update the sh4_icache structure to describe the page(s) containing the
nkeynes@586
  1091
 * given vma. If the address does not reference a RAM/ROM region, the icache
nkeynes@586
  1092
 * will be invalidated instead.
nkeynes@586
  1093
 * If AT is on, this method will raise TLB exceptions normally
nkeynes@586
  1094
 * (hence this method should only be used immediately prior to execution of
nkeynes@586
  1095
 * code), and otherwise will set the icache according to the matching TLB entry.
nkeynes@586
  1096
 * If AT is off, this method will set the entire referenced RAM/ROM region in
nkeynes@586
  1097
 * the icache.
nkeynes@586
  1098
 * @return TRUE if the update completed (successfully or otherwise), FALSE
nkeynes@586
  1099
 * if an exception was raised.
nkeynes@586
  1100
 */
nkeynes@905
  1101
gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
nkeynes@586
  1102
{
nkeynes@586
  1103
    int entryNo;
nkeynes@586
  1104
    if( IS_SH4_PRIVMODE()  ) {
nkeynes@736
  1105
        if( addr & 0x80000000 ) {
nkeynes@736
  1106
            if( addr < 0xC0000000 ) {
nkeynes@736
  1107
                /* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@736
  1108
                mmu_update_icache_phys(addr);
nkeynes@736
  1109
                return TRUE;
nkeynes@736
  1110
            } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
nkeynes@953
  1111
                RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
nkeynes@736
  1112
                return FALSE;
nkeynes@736
  1113
            }
nkeynes@736
  1114
        }
nkeynes@586
  1115
nkeynes@736
  1116
        uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@736
  1117
        if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@736
  1118
            mmu_update_icache_phys(addr);
nkeynes@736
  1119
            return TRUE;
nkeynes@736
  1120
        }
nkeynes@736
  1121
nkeynes@826
  1122
        if( (mmucr & MMUCR_SV) == 0 )
nkeynes@807
  1123
        	entryNo = mmu_itlb_lookup_vpn_asid( addr );
nkeynes@807
  1124
        else
nkeynes@807
  1125
        	entryNo = mmu_itlb_lookup_vpn( addr );
nkeynes@586
  1126
    } else {
nkeynes@736
  1127
        if( addr & 0x80000000 ) {
nkeynes@953
  1128
            RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
nkeynes@736
  1129
            return FALSE;
nkeynes@736
  1130
        }
nkeynes@586
  1131
nkeynes@736
  1132
        uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@736
  1133
        if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@736
  1134
            mmu_update_icache_phys(addr);
nkeynes@736
  1135
            return TRUE;
nkeynes@736
  1136
        }
nkeynes@736
  1137
nkeynes@807
  1138
        entryNo = mmu_itlb_lookup_vpn_asid( addr );
nkeynes@807
  1139
nkeynes@736
  1140
        if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
nkeynes@953
  1141
            RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
nkeynes@736
  1142
            return FALSE;
nkeynes@736
  1143
        }
nkeynes@586
  1144
    }
nkeynes@586
  1145
nkeynes@586
  1146
    switch(entryNo) {
nkeynes@586
  1147
    case -1:
nkeynes@953
  1148
    RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
nkeynes@736
  1149
    return FALSE;
nkeynes@586
  1150
    case -2:
nkeynes@953
  1151
    RAISE_TLB_MULTIHIT_ERROR(addr);
nkeynes@736
  1152
    return FALSE;
nkeynes@586
  1153
    default:
nkeynes@736
  1154
        sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
nkeynes@736
  1155
        sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
nkeynes@736
  1156
        if( sh4_icache.page == NULL ) {
nkeynes@736
  1157
            sh4_icache.page_vma = -1;
nkeynes@736
  1158
        } else {
nkeynes@736
  1159
            sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
nkeynes@736
  1160
            sh4_icache.mask = mmu_itlb[entryNo].mask;
nkeynes@736
  1161
        }
nkeynes@736
  1162
        return TRUE;
nkeynes@586
  1163
    }
nkeynes@586
  1164
}
nkeynes@586
  1165
nkeynes@597
  1166
/**
nkeynes@826
  1167
 * Translate address for disassembly purposes (ie performs an instruction
nkeynes@597
  1168
 * lookup) - does not raise exceptions or modify any state, and ignores
nkeynes@597
  1169
 * protection bits. Returns the translated address, or MMU_VMA_ERROR
nkeynes@826
  1170
 * on translation failure.
nkeynes@597
  1171
 */
nkeynes@905
  1172
sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
nkeynes@597
  1173
{
nkeynes@597
  1174
    if( vma & 0x80000000 ) {
nkeynes@736
  1175
        if( vma < 0xC0000000 ) {
nkeynes@736
  1176
            /* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@736
  1177
            return VMA_TO_EXT_ADDR(vma);
nkeynes@736
  1178
        } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
nkeynes@736
  1179
            /* Not translatable */
nkeynes@736
  1180
            return MMU_VMA_ERROR;
nkeynes@736
  1181
        }
nkeynes@597
  1182
    }
nkeynes@597
  1183
nkeynes@597
  1184
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@597
  1185
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@736
  1186
        return VMA_TO_EXT_ADDR(vma);
nkeynes@597
  1187
    }
nkeynes@736
  1188
nkeynes@597
  1189
    int entryNo = mmu_itlb_lookup_vpn( vma );
nkeynes@597
  1190
    if( entryNo == -2 ) {
nkeynes@736
  1191
        entryNo = mmu_itlb_lookup_vpn_asid( vma );
nkeynes@597
  1192
    }
nkeynes@597
  1193
    if( entryNo < 0 ) {
nkeynes@736
  1194
        return MMU_VMA_ERROR;
nkeynes@597
  1195
    } else {
nkeynes@826
  1196
        return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
nkeynes@826
  1197
        (vma & (~mmu_itlb[entryNo].mask));
nkeynes@597
  1198
    }
nkeynes@597
  1199
}
nkeynes@597
  1200
nkeynes@953
  1201
/********************** TLB Direct-Access Regions ***************************/
nkeynes@953
  1202
#ifdef HAVE_FRAME_ADDRESS
nkeynes@968
  1203
#define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; } while(0)
nkeynes@953
  1204
#else
nkeynes@953
  1205
#define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
nkeynes@953
  1206
#endif
nkeynes@953
  1207
nkeynes@953
  1208
nkeynes@953
  1209
#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
nkeynes@953
  1210
nkeynes@953
  1211
int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
nkeynes@911
  1212
{
nkeynes@953
  1213
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@953
  1214
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
nkeynes@586
  1215
}
nkeynes@586
  1216
nkeynes@953
  1217
void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@953
  1218
{
nkeynes@953
  1219
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@953
  1220
    ent->vpn = val & 0xFFFFFC00;
nkeynes@953
  1221
    ent->asid = val & 0x000000FF;
nkeynes@953
  1222
    ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
nkeynes@953
  1223
}
nkeynes@953
  1224
nkeynes@953
  1225
int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
nkeynes@953
  1226
{
nkeynes@953
  1227
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@953
  1228
    return (ent->ppn & 0x1FFFFC00) | ent->flags;
nkeynes@953
  1229
}
nkeynes@953
  1230
nkeynes@953
  1231
void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@953
  1232
{
nkeynes@953
  1233
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@953
  1234
    ent->ppn = val & 0x1FFFFC00;
nkeynes@953
  1235
    ent->flags = val & 0x00001DA;
nkeynes@953
  1236
    ent->mask = get_tlb_size_mask(val);
nkeynes@953
  1237
    if( ent->ppn >= 0x1C000000 )
nkeynes@953
  1238
        ent->ppn |= 0xE0000000;
nkeynes@953
  1239
}
nkeynes@953
  1240
nkeynes@953
  1241
#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
nkeynes@953
  1242
#define UTLB_ASSOC(addr) (addr&0x80)
nkeynes@953
  1243
#define UTLB_DATA2(addr) (addr&0x00800000)
nkeynes@953
  1244
nkeynes@953
  1245
int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
nkeynes@953
  1246
{
nkeynes@953
  1247
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@953
  1248
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
nkeynes@953
  1249
    ((ent->flags & TLB_DIRTY)<<7);
nkeynes@953
  1250
}
nkeynes@953
  1251
int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
nkeynes@953
  1252
{
nkeynes@953
  1253
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@953
  1254
    if( UTLB_DATA2(addr) ) {
nkeynes@953
  1255
        return ent->pcmcia;
nkeynes@953
  1256
    } else {
nkeynes@953
  1257
        return (ent->ppn&0x1FFFFC00) | ent->flags;
nkeynes@953
  1258
    }
nkeynes@953
  1259
}
nkeynes@953
  1260
nkeynes@953
  1261
/**
nkeynes@953
  1262
 * Find a UTLB entry for the associative TLB write - same as the normal
nkeynes@953
  1263
 * lookup but ignores the valid bit.
nkeynes@953
  1264
 */
nkeynes@953
  1265
static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@953
  1266
{
nkeynes@953
  1267
    int result = -1;
nkeynes@953
  1268
    unsigned int i;
nkeynes@953
  1269
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@953
  1270
        if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@953
  1271
                ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
nkeynes@953
  1272
                ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@953
  1273
            if( result != -1 ) {
nkeynes@953
  1274
                fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
nkeynes@953
  1275
                return -2;
nkeynes@953
  1276
            }
nkeynes@953
  1277
            result = i;
nkeynes@953
  1278
        }
nkeynes@953
  1279
    }
nkeynes@953
  1280
    return result;
nkeynes@953
  1281
}
nkeynes@953
  1282
nkeynes@953
  1283
/**
nkeynes@953
  1284
 * Find a ITLB entry for the associative TLB write - same as the normal
nkeynes@953
  1285
 * lookup but ignores the valid bit.
nkeynes@953
  1286
 */
nkeynes@953
  1287
static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@953
  1288
{
nkeynes@953
  1289
    int result = -1;
nkeynes@953
  1290
    unsigned int i;
nkeynes@953
  1291
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@953
  1292
        if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@953
  1293
                ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
nkeynes@953
  1294
                ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@953
  1295
            if( result != -1 ) {
nkeynes@953
  1296
                return -2;
nkeynes@953
  1297
            }
nkeynes@953
  1298
            result = i;
nkeynes@953
  1299
        }
nkeynes@953
  1300
    }
nkeynes@953
  1301
    return result;
nkeynes@953
  1302
}
nkeynes@953
  1303
nkeynes@953
  1304
void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@953
  1305
{
nkeynes@953
  1306
    if( UTLB_ASSOC(addr) ) {
nkeynes@953
  1307
        int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
nkeynes@953
  1308
        if( utlb >= 0 ) {
nkeynes@953
  1309
            struct utlb_entry *ent = &mmu_utlb[utlb];
nkeynes@953
  1310
            uint32_t old_flags = ent->flags;
nkeynes@953
  1311
            ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
nkeynes@953
  1312
            ent->flags |= (val & TLB_VALID);
nkeynes@953
  1313
            ent->flags |= ((val & 0x200)>>7);
nkeynes@953
  1314
            if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
nkeynes@953
  1315
                if( old_flags & TLB_VALID )
nkeynes@953
  1316
                    mmu_utlb_remove_entry( utlb );
nkeynes@953
  1317
                if( ent->flags & TLB_VALID )
nkeynes@953
  1318
                    mmu_utlb_insert_entry( utlb );
nkeynes@953
  1319
            }
nkeynes@953
  1320
        }
nkeynes@953
  1321
nkeynes@953
  1322
        int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
nkeynes@953
  1323
        if( itlb >= 0 ) {
nkeynes@953
  1324
            struct itlb_entry *ent = &mmu_itlb[itlb];
nkeynes@953
  1325
            ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
nkeynes@953
  1326
        }
nkeynes@953
  1327
nkeynes@953
  1328
        if( itlb == -2 || utlb == -2 ) {
nkeynes@953
  1329
            RAISE_TLB_MULTIHIT_ERROR(addr);
nkeynes@953
  1330
            EXCEPTION_EXIT();
nkeynes@953
  1331
            return;
nkeynes@953
  1332
        }
nkeynes@953
  1333
    } else {
nkeynes@953
  1334
        struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@953
  1335
        if( ent->flags & TLB_VALID ) 
nkeynes@953
  1336
            mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
nkeynes@953
  1337
        ent->vpn = (val & 0xFFFFFC00);
nkeynes@953
  1338
        ent->asid = (val & 0xFF);
nkeynes@953
  1339
        ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
nkeynes@953
  1340
        ent->flags |= (val & TLB_VALID);
nkeynes@953
  1341
        ent->flags |= ((val & 0x200)>>7);
nkeynes@953
  1342
        if( ent->flags & TLB_VALID ) 
nkeynes@953
  1343
            mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
nkeynes@953
  1344
    }
nkeynes@953
  1345
}
nkeynes@953
  1346
nkeynes@953
  1347
void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@953
  1348
{
nkeynes@953
  1349
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@953
  1350
    if( UTLB_DATA2(addr) ) {
nkeynes@953
  1351
        ent->pcmcia = val & 0x0000000F;
nkeynes@953
  1352
    } else {
nkeynes@953
  1353
        if( ent->flags & TLB_VALID ) 
nkeynes@953
  1354
            mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
nkeynes@953
  1355
        ent->ppn = (val & 0x1FFFFC00);
nkeynes@953
  1356
        ent->flags = (val & 0x000001FF);
nkeynes@953
  1357
        ent->mask = get_tlb_size_mask(val);
nkeynes@953
  1358
        if( ent->flags & TLB_VALID ) 
nkeynes@953
  1359
            mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
nkeynes@953
  1360
    }
nkeynes@953
  1361
}
nkeynes@953
  1362
nkeynes@953
  1363
struct mem_region_fn p4_region_itlb_addr = {
nkeynes@953
  1364
        mmu_itlb_addr_read, mmu_itlb_addr_write,
nkeynes@953
  1365
        mmu_itlb_addr_read, mmu_itlb_addr_write,
nkeynes@953
  1366
        mmu_itlb_addr_read, mmu_itlb_addr_write,
nkeynes@953
  1367
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1368
        unmapped_prefetch, mmu_itlb_addr_read };
nkeynes@953
  1369
struct mem_region_fn p4_region_itlb_data = {
nkeynes@953
  1370
        mmu_itlb_data_read, mmu_itlb_data_write,
nkeynes@953
  1371
        mmu_itlb_data_read, mmu_itlb_data_write,
nkeynes@953
  1372
        mmu_itlb_data_read, mmu_itlb_data_write,
nkeynes@953
  1373
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1374
        unmapped_prefetch, mmu_itlb_data_read };
nkeynes@953
  1375
struct mem_region_fn p4_region_utlb_addr = {
nkeynes@953
  1376
        mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
nkeynes@953
  1377
        mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
nkeynes@953
  1378
        mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
nkeynes@953
  1379
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1380
        unmapped_prefetch, mmu_utlb_addr_read };
nkeynes@953
  1381
struct mem_region_fn p4_region_utlb_data = {
nkeynes@953
  1382
        mmu_utlb_data_read, mmu_utlb_data_write,
nkeynes@953
  1383
        mmu_utlb_data_read, mmu_utlb_data_write,
nkeynes@953
  1384
        mmu_utlb_data_read, mmu_utlb_data_write,
nkeynes@953
  1385
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1386
        unmapped_prefetch, mmu_utlb_data_read };
nkeynes@953
  1387
nkeynes@953
  1388
/********************** Error regions **************************/
nkeynes@953
  1389
nkeynes@953
  1390
static void FASTCALL address_error_read( sh4addr_t addr, void *exc ) 
nkeynes@953
  1391
{
nkeynes@953
  1392
    RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
nkeynes@953
  1393
    EXCEPTION_EXIT();
nkeynes@953
  1394
}
nkeynes@953
  1395
nkeynes@975
  1396
static void FASTCALL address_error_read_for_write( sh4addr_t addr, void *exc ) 
nkeynes@975
  1397
{
nkeynes@975
  1398
    RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
nkeynes@975
  1399
    EXCEPTION_EXIT();
nkeynes@975
  1400
}
nkeynes@975
  1401
nkeynes@953
  1402
static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc ) 
nkeynes@953
  1403
{
nkeynes@953
  1404
    RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
nkeynes@953
  1405
    EXCEPTION_EXIT();
nkeynes@953
  1406
}
nkeynes@953
  1407
nkeynes@953
  1408
static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@953
  1409
{
nkeynes@953
  1410
    RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
nkeynes@953
  1411
    EXCEPTION_EXIT();
nkeynes@953
  1412
}
nkeynes@953
  1413
nkeynes@953
  1414
static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
nkeynes@953
  1415
{
nkeynes@973
  1416
    mmu_urc++;
nkeynes@953
  1417
    RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
nkeynes@953
  1418
    EXCEPTION_EXIT();
nkeynes@953
  1419
}
nkeynes@953
  1420
nkeynes@975
  1421
static void FASTCALL tlb_miss_read_for_write( sh4addr_t addr, void *exc )
nkeynes@975
  1422
{
nkeynes@975
  1423
    mmu_urc++;
nkeynes@975
  1424
    RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
nkeynes@975
  1425
    EXCEPTION_EXIT();
nkeynes@975
  1426
}
nkeynes@975
  1427
nkeynes@953
  1428
static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
nkeynes@953
  1429
{
nkeynes@973
  1430
    mmu_urc++;
nkeynes@953
  1431
    RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
nkeynes@953
  1432
    EXCEPTION_EXIT();
nkeynes@953
  1433
}
nkeynes@953
  1434
nkeynes@953
  1435
static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@953
  1436
{
nkeynes@973
  1437
    mmu_urc++;
nkeynes@953
  1438
    RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
nkeynes@953
  1439
    EXCEPTION_EXIT();
nkeynes@975
  1440
}
nkeynes@953
  1441
nkeynes@953
  1442
static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
nkeynes@953
  1443
{
nkeynes@973
  1444
    mmu_urc++;
nkeynes@953
  1445
    RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
nkeynes@953
  1446
    EXCEPTION_EXIT();
nkeynes@968
  1447
    return 0; 
nkeynes@953
  1448
}
nkeynes@953
  1449
nkeynes@975
  1450
static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc )
nkeynes@975
  1451
{
nkeynes@975
  1452
    mmu_urc++;
nkeynes@975
  1453
    RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
nkeynes@975
  1454
    EXCEPTION_EXIT();
nkeynes@975
  1455
    return 0;
nkeynes@975
  1456
}
nkeynes@975
  1457
nkeynes@953
  1458
static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
nkeynes@953
  1459
{
nkeynes@973
  1460
    mmu_urc++;
nkeynes@953
  1461
    RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
nkeynes@953
  1462
    EXCEPTION_EXIT();
nkeynes@968
  1463
    return 0;
nkeynes@953
  1464
}
nkeynes@953
  1465
nkeynes@953
  1466
static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@953
  1467
{
nkeynes@973
  1468
    mmu_urc++;
nkeynes@953
  1469
    RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
nkeynes@953
  1470
    EXCEPTION_EXIT();
nkeynes@953
  1471
}
nkeynes@953
  1472
nkeynes@953
  1473
static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@953
  1474
{
nkeynes@973
  1475
    mmu_urc++;
nkeynes@953
  1476
    RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
nkeynes@953
  1477
    EXCEPTION_EXIT();
nkeynes@953
  1478
}
nkeynes@975
  1479
nkeynes@975
  1480
static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc )
nkeynes@975
  1481
{
nkeynes@975
  1482
    mmu_urc++;
nkeynes@975
  1483
    RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
nkeynes@975
  1484
    EXCEPTION_EXIT();
nkeynes@975
  1485
    return 0;
nkeynes@975
  1486
}    
nkeynes@953
  1487
    
nkeynes@953
  1488
static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
nkeynes@953
  1489
{
nkeynes@953
  1490
    sh4_raise_tlb_multihit(addr);
nkeynes@953
  1491
    EXCEPTION_EXIT();
nkeynes@968
  1492
    return 0; 
nkeynes@953
  1493
}
nkeynes@953
  1494
nkeynes@953
  1495
static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
nkeynes@953
  1496
{
nkeynes@953
  1497
    sh4_raise_tlb_multihit(addr);
nkeynes@953
  1498
    EXCEPTION_EXIT();
nkeynes@968
  1499
    return 0; 
nkeynes@953
  1500
}
nkeynes@953
  1501
static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
nkeynes@953
  1502
{
nkeynes@953
  1503
    sh4_raise_tlb_multihit(addr);
nkeynes@953
  1504
    EXCEPTION_EXIT();
nkeynes@953
  1505
}
nkeynes@953
  1506
nkeynes@953
  1507
/**
nkeynes@953
  1508
 * Note: Per sec 4.6.4 of the SH7750 manual, SQ 
nkeynes@953
  1509
 */
nkeynes@953
  1510
struct mem_region_fn mem_region_address_error = {
nkeynes@953
  1511
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1512
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1513
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1514
        (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
nkeynes@975
  1515
        unmapped_prefetch, (mem_read_fn_t)address_error_read_for_write };
nkeynes@953
  1516
nkeynes@953
  1517
struct mem_region_fn mem_region_tlb_miss = {
nkeynes@953
  1518
        (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
nkeynes@953
  1519
        (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
nkeynes@953
  1520
        (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
nkeynes@953
  1521
        (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
nkeynes@975
  1522
        unmapped_prefetch, (mem_read_fn_t)tlb_miss_read_for_write };
nkeynes@953
  1523
nkeynes@953
  1524
struct mem_region_fn mem_region_tlb_protected = {
nkeynes@953
  1525
        (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
nkeynes@953
  1526
        (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
nkeynes@953
  1527
        (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
nkeynes@953
  1528
        (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
nkeynes@975
  1529
        unmapped_prefetch, (mem_read_fn_t)tlb_protected_read_for_write };
nkeynes@953
  1530
nkeynes@953
  1531
struct mem_region_fn mem_region_tlb_multihit = {
nkeynes@953
  1532
        (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
nkeynes@953
  1533
        (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
nkeynes@953
  1534
        (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
nkeynes@953
  1535
        (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
nkeynes@975
  1536
        (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)tlb_multi_hit_read };
nkeynes@953
  1537
        
nkeynes@953
  1538
nkeynes@953
  1539
/* Store-queue regions */
nkeynes@953
  1540
/* These are a bit of a pain - the first 8 fields are controlled by SQMD, while 
nkeynes@953
  1541
 * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
nkeynes@953
  1542
 * some cases), in contrast to the ordinary fields above.
nkeynes@953
  1543
 * 
nkeynes@953
  1544
 * There is probably a simpler way to do this.
nkeynes@953
  1545
 */
nkeynes@953
  1546
nkeynes@953
  1547
struct mem_region_fn p4_region_storequeue = { 
nkeynes@953
  1548
        ccn_storequeue_read_long, ccn_storequeue_write_long,
nkeynes@953
  1549
        unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
nkeynes@953
  1550
        unmapped_read_long, unmapped_write_long,
nkeynes@953
  1551
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1552
        ccn_storequeue_prefetch, unmapped_read_long }; 
nkeynes@953
  1553
nkeynes@953
  1554
struct mem_region_fn p4_region_storequeue_miss = { 
nkeynes@953
  1555
        ccn_storequeue_read_long, ccn_storequeue_write_long,
nkeynes@953
  1556
        unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
nkeynes@953
  1557
        unmapped_read_long, unmapped_write_long,
nkeynes@953
  1558
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1559
        (mem_prefetch_fn_t)tlb_miss_read, unmapped_read_long }; 
nkeynes@953
  1560
nkeynes@953
  1561
struct mem_region_fn p4_region_storequeue_multihit = { 
nkeynes@953
  1562
        ccn_storequeue_read_long, ccn_storequeue_write_long,
nkeynes@953
  1563
        unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
nkeynes@953
  1564
        unmapped_read_long, unmapped_write_long,
nkeynes@953
  1565
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1566
        (mem_prefetch_fn_t)tlb_multi_hit_read, unmapped_read_long }; 
nkeynes@953
  1567
nkeynes@953
  1568
struct mem_region_fn p4_region_storequeue_protected = {
nkeynes@953
  1569
        ccn_storequeue_read_long, ccn_storequeue_write_long,
nkeynes@953
  1570
        unmapped_read_long, unmapped_write_long,
nkeynes@953
  1571
        unmapped_read_long, unmapped_write_long,
nkeynes@953
  1572
        unmapped_read_burst, unmapped_write_burst,
nkeynes@975
  1573
        (mem_prefetch_fn_t)tlb_protected_read, unmapped_read_long };
nkeynes@953
  1574
nkeynes@953
  1575
struct mem_region_fn p4_region_storequeue_sqmd = {
nkeynes@953
  1576
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1577
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1578
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1579
        (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
nkeynes@975
  1580
        (mem_prefetch_fn_t)address_error_read, (mem_read_fn_t)address_error_read_for_write };
nkeynes@953
  1581
        
nkeynes@953
  1582
struct mem_region_fn p4_region_storequeue_sqmd_miss = { 
nkeynes@953
  1583
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1584
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1585
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1586
        (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
nkeynes@975
  1587
        (mem_prefetch_fn_t)tlb_miss_read, (mem_read_fn_t)address_error_read_for_write }; 
nkeynes@953
  1588
nkeynes@953
  1589
struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
nkeynes@953
  1590
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1591
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1592
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1593
        (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
nkeynes@975
  1594
        (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)address_error_read_for_write };
nkeynes@953
  1595
        
nkeynes@953
  1596
struct mem_region_fn p4_region_storequeue_sqmd_protected = {
nkeynes@953
  1597
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1598
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1599
        (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
nkeynes@953
  1600
        (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
nkeynes@975
  1601
        (mem_prefetch_fn_t)tlb_protected_read, (mem_read_fn_t)address_error_read_for_write };
nkeynes@953
  1602
.