Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 561:533f6b478071
prev559:06714bc64271
next569:a1c49e1e8776
author nkeynes
date Tue Jan 01 05:08:38 2008 +0000 (16 years ago)
branchlxdream-mmu
permissions -rw-r--r--
last change Enable Id keyword on all source files
file annotate diff log raw
nkeynes@550
     1
/**
nkeynes@561
     2
 * $Id$
nkeynes@550
     3
 * 
nkeynes@550
     4
 * MMU implementation
nkeynes@550
     5
 *
nkeynes@550
     6
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@550
     7
 *
nkeynes@550
     8
 * This program is free software; you can redistribute it and/or modify
nkeynes@550
     9
 * it under the terms of the GNU General Public License as published by
nkeynes@550
    10
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@550
    11
 * (at your option) any later version.
nkeynes@550
    12
 *
nkeynes@550
    13
 * This program is distributed in the hope that it will be useful,
nkeynes@550
    14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@550
    15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@550
    16
 * GNU General Public License for more details.
nkeynes@550
    17
 */
nkeynes@550
    18
#define MODULE sh4_module
nkeynes@550
    19
nkeynes@550
    20
#include <stdio.h>
nkeynes@550
    21
#include "sh4/sh4mmio.h"
nkeynes@550
    22
#include "sh4/sh4core.h"
nkeynes@550
    23
#include "mem.h"
nkeynes@550
    24
nkeynes@550
    25
#define OCRAM_START (0x1C000000>>PAGE_BITS)
nkeynes@550
    26
#define OCRAM_END   (0x20000000>>PAGE_BITS)
nkeynes@550
    27
nkeynes@550
    28
#define ITLB_ENTRY_COUNT 4
nkeynes@550
    29
#define UTLB_ENTRY_COUNT 64
nkeynes@550
    30
nkeynes@550
    31
/* Entry address */
nkeynes@550
    32
#define TLB_VALID     0x00000100
nkeynes@550
    33
#define TLB_USERMODE  0x00000040
nkeynes@550
    34
#define TLB_WRITABLE  0x00000020
nkeynes@559
    35
#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
nkeynes@550
    36
#define TLB_SIZE_MASK 0x00000090
nkeynes@550
    37
#define TLB_SIZE_1K   0x00000000
nkeynes@550
    38
#define TLB_SIZE_4K   0x00000010
nkeynes@550
    39
#define TLB_SIZE_64K  0x00000080
nkeynes@550
    40
#define TLB_SIZE_1M   0x00000090
nkeynes@550
    41
#define TLB_CACHEABLE 0x00000008
nkeynes@550
    42
#define TLB_DIRTY     0x00000004
nkeynes@550
    43
#define TLB_SHARE     0x00000002
nkeynes@550
    44
#define TLB_WRITETHRU 0x00000001
nkeynes@550
    45
nkeynes@559
    46
#define MASK_1K  0xFFFFFC00
nkeynes@559
    47
#define MASK_4K  0xFFFFF000
nkeynes@559
    48
#define MASK_64K 0xFFFF0000
nkeynes@559
    49
#define MASK_1M  0xFFF00000
nkeynes@550
    50
nkeynes@550
    51
struct itlb_entry {
nkeynes@550
    52
    sh4addr_t vpn; // Virtual Page Number
nkeynes@550
    53
    uint32_t asid; // Process ID
nkeynes@559
    54
    uint32_t mask;
nkeynes@550
    55
    sh4addr_t ppn; // Physical Page Number
nkeynes@550
    56
    uint32_t flags;
nkeynes@550
    57
};
nkeynes@550
    58
nkeynes@550
    59
struct utlb_entry {
nkeynes@550
    60
    sh4addr_t vpn; // Virtual Page Number
nkeynes@559
    61
    uint32_t mask; // Page size mask
nkeynes@550
    62
    uint32_t asid; // Process ID
nkeynes@550
    63
    sh4addr_t ppn; // Physical Page Number
nkeynes@550
    64
    uint32_t flags;
nkeynes@550
    65
    uint32_t pcmcia; // extra pcmcia data - not used
nkeynes@550
    66
};
nkeynes@550
    67
nkeynes@550
    68
static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
nkeynes@550
    69
static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
nkeynes@550
    70
static uint32_t mmu_urc;
nkeynes@550
    71
static uint32_t mmu_urb;
nkeynes@550
    72
static uint32_t mmu_lrui;
nkeynes@550
    73
nkeynes@550
    74
static sh4ptr_t cache = NULL;
nkeynes@550
    75
nkeynes@550
    76
static void mmu_invalidate_tlb();
nkeynes@550
    77
nkeynes@550
    78
nkeynes@559
    79
static uint32_t get_mask_for_flags( uint32_t flags )
nkeynes@559
    80
{
nkeynes@559
    81
    switch( flags & TLB_SIZE_MASK ) {
nkeynes@559
    82
    case TLB_SIZE_1K: return MASK_1K;
nkeynes@559
    83
    case TLB_SIZE_4K: return MASK_4K;
nkeynes@559
    84
    case TLB_SIZE_64K: return MASK_64K;
nkeynes@559
    85
    case TLB_SIZE_1M: return MASK_1M;
nkeynes@559
    86
    }
nkeynes@559
    87
}
nkeynes@559
    88
nkeynes@550
    89
int32_t mmio_region_MMU_read( uint32_t reg )
nkeynes@550
    90
{
nkeynes@550
    91
    switch( reg ) {
nkeynes@550
    92
    case MMUCR:
nkeynes@550
    93
	return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
nkeynes@550
    94
    default:
nkeynes@550
    95
	return MMIO_READ( MMU, reg );
nkeynes@550
    96
    }
nkeynes@550
    97
}
nkeynes@550
    98
nkeynes@550
    99
void mmio_region_MMU_write( uint32_t reg, uint32_t val )
nkeynes@550
   100
{
nkeynes@550
   101
    switch(reg) {
nkeynes@550
   102
    case PTEH:
nkeynes@550
   103
	val &= 0xFFFFFCFF;
nkeynes@550
   104
	break;
nkeynes@550
   105
    case PTEL:
nkeynes@550
   106
	val &= 0x1FFFFDFF;
nkeynes@550
   107
	break;
nkeynes@550
   108
    case PTEA:
nkeynes@550
   109
	val &= 0x0000000F;
nkeynes@550
   110
	break;
nkeynes@550
   111
    case MMUCR:
nkeynes@550
   112
	if( val & MMUCR_TI ) {
nkeynes@550
   113
	    mmu_invalidate_tlb();
nkeynes@550
   114
	}
nkeynes@550
   115
	mmu_urc = (val >> 10) & 0x3F;
nkeynes@550
   116
	mmu_urb = (val >> 18) & 0x3F;
nkeynes@550
   117
	mmu_lrui = (val >> 26) & 0x3F;
nkeynes@550
   118
	val &= 0x00000301;
nkeynes@550
   119
	break;
nkeynes@550
   120
    case CCR:
nkeynes@550
   121
	mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
nkeynes@550
   122
	break;
nkeynes@550
   123
    default:
nkeynes@550
   124
	break;
nkeynes@550
   125
    }
nkeynes@550
   126
    MMIO_WRITE( MMU, reg, val );
nkeynes@550
   127
}
nkeynes@550
   128
nkeynes@550
   129
nkeynes@550
   130
void MMU_init() 
nkeynes@550
   131
{
nkeynes@550
   132
    cache = mem_alloc_pages(2);
nkeynes@550
   133
}
nkeynes@550
   134
nkeynes@550
   135
void MMU_reset()
nkeynes@550
   136
{
nkeynes@550
   137
    mmio_region_MMU_write( CCR, 0 );
nkeynes@550
   138
}
nkeynes@550
   139
nkeynes@550
   140
void MMU_save_state( FILE *f )
nkeynes@550
   141
{
nkeynes@550
   142
    fwrite( cache, 4096, 2, f );
nkeynes@550
   143
    fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
nkeynes@550
   144
    fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
nkeynes@559
   145
    fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
nkeynes@559
   146
    fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
nkeynes@559
   147
    fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
nkeynes@550
   148
}
nkeynes@550
   149
nkeynes@550
   150
int MMU_load_state( FILE *f )
nkeynes@550
   151
{
nkeynes@550
   152
    /* Setup the cache mode according to the saved register value
nkeynes@550
   153
     * (mem_load runs before this point to load all MMIO data)
nkeynes@550
   154
     */
nkeynes@550
   155
    mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
nkeynes@550
   156
    if( fread( cache, 4096, 2, f ) != 2 ) {
nkeynes@550
   157
	return 1;
nkeynes@550
   158
    }
nkeynes@550
   159
    if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
nkeynes@550
   160
	return 1;
nkeynes@550
   161
    }
nkeynes@550
   162
    if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
nkeynes@550
   163
	return 1;
nkeynes@550
   164
    }
nkeynes@559
   165
    if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
nkeynes@559
   166
	return 1;
nkeynes@559
   167
    }
nkeynes@559
   168
    if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
nkeynes@559
   169
	return 1;
nkeynes@559
   170
    }
nkeynes@559
   171
    if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
nkeynes@559
   172
	return 1;
nkeynes@559
   173
    }
nkeynes@550
   174
    return 0;
nkeynes@550
   175
}
nkeynes@550
   176
nkeynes@550
   177
void mmu_set_cache_mode( int mode )
nkeynes@550
   178
{
nkeynes@550
   179
    uint32_t i;
nkeynes@550
   180
    switch( mode ) {
nkeynes@550
   181
        case MEM_OC_INDEX0: /* OIX=0 */
nkeynes@550
   182
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   183
                page_map[i] = cache + ((i&0x02)<<(PAGE_BITS-1));
nkeynes@550
   184
            break;
nkeynes@550
   185
        case MEM_OC_INDEX1: /* OIX=1 */
nkeynes@550
   186
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   187
                page_map[i] = cache + ((i&0x02000000)>>(25-PAGE_BITS));
nkeynes@550
   188
            break;
nkeynes@550
   189
        default: /* disabled */
nkeynes@550
   190
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   191
                page_map[i] = NULL;
nkeynes@550
   192
            break;
nkeynes@550
   193
    }
nkeynes@550
   194
}
nkeynes@550
   195
nkeynes@550
   196
/* TLB maintanence */
nkeynes@550
   197
nkeynes@550
   198
/**
nkeynes@550
   199
 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
nkeynes@550
   200
 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
nkeynes@550
   201
 */
nkeynes@550
   202
void MMU_ldtlb()
nkeynes@550
   203
{
nkeynes@550
   204
    mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
nkeynes@550
   205
    mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
nkeynes@550
   206
    mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
nkeynes@550
   207
    mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
nkeynes@550
   208
    mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
nkeynes@559
   209
    mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
nkeynes@550
   210
}
nkeynes@550
   211
nkeynes@559
   212
static inline void mmu_flush_pages( struct utlb_entry *ent )
nkeynes@550
   213
{
nkeynes@559
   214
    unsigned int vpn;
nkeynes@559
   215
    switch( ent->flags & TLB_SIZE_MASK ) {
nkeynes@559
   216
    case TLB_SIZE_1K: xlat_flush_page( ent->vpn ); break;
nkeynes@559
   217
    case TLB_SIZE_4K: xlat_flush_page( ent->vpn ); break;
nkeynes@559
   218
    case TLB_SIZE_64K: 
nkeynes@559
   219
	for( vpn = ent->vpn; vpn < ent->vpn + 0x10000; vpn += 0x1000 ) {
nkeynes@559
   220
	    xlat_flush_page( vpn );
nkeynes@559
   221
	}
nkeynes@559
   222
	break;
nkeynes@559
   223
    case TLB_SIZE_1M:
nkeynes@559
   224
	for( vpn = ent->vpn; vpn < ent->vpn + 0x100000; vpn += 0x1000 ) {
nkeynes@559
   225
	    xlat_flush_page( vpn );
nkeynes@559
   226
	}
nkeynes@559
   227
	break;
nkeynes@559
   228
    }
nkeynes@559
   229
}
nkeynes@559
   230
nkeynes@559
   231
/**
nkeynes@559
   232
 * The translations are excessively complicated, but unfortunately it's a 
nkeynes@559
   233
 * complicated system. It can undoubtedly be better optimized too.
nkeynes@559
   234
 */
nkeynes@559
   235
nkeynes@559
   236
/**
nkeynes@559
   237
 * Perform the actual utlb lookup.
nkeynes@559
   238
 * Possible utcomes are:
nkeynes@559
   239
 *   0..63 Single match - good, return entry found
nkeynes@559
   240
 *   -1 No match - raise a tlb data miss exception
nkeynes@559
   241
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@559
   242
 * @param vpn virtual address to resolve
nkeynes@559
   243
 * @param asid Address space identifier
nkeynes@559
   244
 * @param use_asid whether to require an asid match on non-shared pages.
nkeynes@559
   245
 * @return the resultant UTLB entry, or an error.
nkeynes@559
   246
 */
nkeynes@559
   247
static inline int mmu_utlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid )
nkeynes@559
   248
{
nkeynes@559
   249
    int result = -1;
nkeynes@559
   250
    unsigned int i;
nkeynes@559
   251
nkeynes@559
   252
    mmu_urc++;
nkeynes@559
   253
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@559
   254
	mmu_urc = 0;
nkeynes@559
   255
    }
nkeynes@559
   256
nkeynes@559
   257
    if( use_asid ) {
nkeynes@559
   258
	for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@559
   259
	    if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@559
   260
	        ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
nkeynes@559
   261
		((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@559
   262
		if( result != -1 ) {
nkeynes@559
   263
		    return -2;
nkeynes@559
   264
		}
nkeynes@559
   265
		result = i;
nkeynes@550
   266
	    }
nkeynes@550
   267
	}
nkeynes@550
   268
    } else {
nkeynes@559
   269
	for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@559
   270
	    if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@559
   271
		((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@559
   272
		if( result != -1 ) {
nkeynes@559
   273
		    return -2;
nkeynes@559
   274
		}
nkeynes@559
   275
		result = i;
nkeynes@550
   276
	    }
nkeynes@550
   277
	}
nkeynes@550
   278
    }
nkeynes@559
   279
    return result;
nkeynes@559
   280
}
nkeynes@559
   281
nkeynes@559
   282
/**
nkeynes@559
   283
 * Find a UTLB entry for the associative TLB write - same as the normal
nkeynes@559
   284
 * lookup but ignores the valid bit.
nkeynes@559
   285
 */
nkeynes@559
   286
static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@559
   287
{
nkeynes@559
   288
    int result = -1;
nkeynes@559
   289
    unsigned int i;
nkeynes@559
   290
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@559
   291
	if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
nkeynes@559
   292
	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@559
   293
	    if( result != -1 ) {
nkeynes@559
   294
		return -2;
nkeynes@559
   295
	    }
nkeynes@559
   296
	    result = i;
nkeynes@559
   297
	}
nkeynes@559
   298
    }
nkeynes@559
   299
    return result;
nkeynes@559
   300
}
nkeynes@559
   301
nkeynes@559
   302
/**
nkeynes@559
   303
 * Perform the actual itlb lookup.
nkeynes@559
   304
 * Possible utcomes are:
nkeynes@559
   305
 *   0..63 Single match - good, return entry found
nkeynes@559
   306
 *   -1 No match - raise a tlb data miss exception
nkeynes@559
   307
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@559
   308
 * @param vpn virtual address to resolve
nkeynes@559
   309
 * @param asid Address space identifier
nkeynes@559
   310
 * @param use_asid whether to require an asid match on non-shared pages.
nkeynes@559
   311
 * @return the resultant ITLB entry, or an error.
nkeynes@559
   312
 */
nkeynes@559
   313
static inline int mmu_itlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid )
nkeynes@559
   314
{
nkeynes@559
   315
    int result = -1;
nkeynes@559
   316
    unsigned int i;
nkeynes@559
   317
    if( use_asid ) {
nkeynes@559
   318
	for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@559
   319
	    if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@559
   320
	        ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
nkeynes@559
   321
		((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@559
   322
		if( result != -1 ) {
nkeynes@559
   323
		    return -2;
nkeynes@559
   324
		}
nkeynes@559
   325
		result = i;
nkeynes@559
   326
	    }
nkeynes@559
   327
	}
nkeynes@559
   328
    } else {
nkeynes@559
   329
	for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@559
   330
	    if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@559
   331
		((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@559
   332
		if( result != -1 ) {
nkeynes@559
   333
		    return -2;
nkeynes@559
   334
		}
nkeynes@559
   335
		result = i;
nkeynes@559
   336
	    }
nkeynes@559
   337
	}
nkeynes@559
   338
    }
nkeynes@559
   339
nkeynes@559
   340
    switch( result ) {
nkeynes@559
   341
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@559
   342
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@559
   343
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@559
   344
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@559
   345
    }
nkeynes@559
   346
	
nkeynes@559
   347
    return result;
nkeynes@559
   348
}
nkeynes@559
   349
nkeynes@559
   350
static int inline mmu_itlb_update_from_utlb( int entryNo )
nkeynes@559
   351
{
nkeynes@559
   352
    int replace;
nkeynes@559
   353
    /* Determine entry to replace based on lrui */
nkeynes@559
   354
    if( mmu_lrui & 0x38 == 0x38 ) {
nkeynes@559
   355
	replace = 0;
nkeynes@559
   356
	mmu_lrui = mmu_lrui & 0x07;
nkeynes@559
   357
    } else if( (mmu_lrui & 0x26) == 0x06 ) {
nkeynes@559
   358
	replace = 1;
nkeynes@559
   359
	mmu_lrui = (mmu_lrui & 0x19) | 0x20;
nkeynes@559
   360
    } else if( (mmu_lrui & 0x15) == 0x01 ) {
nkeynes@559
   361
	replace = 2;
nkeynes@559
   362
	mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
nkeynes@559
   363
    } else { // Note - gets invalid entries too
nkeynes@559
   364
	replace = 3;
nkeynes@559
   365
	mmu_lrui = (mmu_lrui | 0x0B);
nkeynes@559
   366
    } 
nkeynes@559
   367
nkeynes@559
   368
    mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
nkeynes@559
   369
    mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
nkeynes@559
   370
    mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
nkeynes@559
   371
    mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
nkeynes@559
   372
    mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
nkeynes@559
   373
    return replace;
nkeynes@559
   374
}
nkeynes@559
   375
nkeynes@559
   376
/**
nkeynes@559
   377
 * Find a ITLB entry for the associative TLB write - same as the normal
nkeynes@559
   378
 * lookup but ignores the valid bit.
nkeynes@559
   379
 */
nkeynes@559
   380
static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@559
   381
{
nkeynes@559
   382
    int result = -1;
nkeynes@559
   383
    unsigned int i;
nkeynes@559
   384
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@559
   385
	if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
nkeynes@559
   386
	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@559
   387
	    if( result != -1 ) {
nkeynes@559
   388
		return -2;
nkeynes@559
   389
	    }
nkeynes@559
   390
	    result = i;
nkeynes@559
   391
	}
nkeynes@559
   392
    }
nkeynes@559
   393
    return result;
nkeynes@559
   394
}
nkeynes@559
   395
nkeynes@559
   396
#define RAISE_TLB_ERROR(code, vpn) \
nkeynes@559
   397
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@559
   398
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
nkeynes@559
   399
    sh4_raise_tlb_exception(code); \
nkeynes@559
   400
    return (((uint64_t)code)<<32)
nkeynes@559
   401
nkeynes@559
   402
#define RAISE_MEM_ERROR(code, vpn) \
nkeynes@559
   403
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@559
   404
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
nkeynes@559
   405
    sh4_raise_exception(code); \
nkeynes@559
   406
    return (((uint64_t)code)<<32)
nkeynes@559
   407
nkeynes@559
   408
#define RAISE_OTHER_ERROR(code) \
nkeynes@559
   409
    sh4_raise_exception(code); \
nkeynes@559
   410
    return (((uint64_t)EXV_EXCEPTION)<<32)
nkeynes@559
   411
nkeynes@559
   412
/**
nkeynes@559
   413
 * Abort with a non-MMU address error. Caused by user-mode code attempting
nkeynes@559
   414
 * to access privileged regions, or alignment faults.
nkeynes@559
   415
 */
nkeynes@559
   416
#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
nkeynes@559
   417
#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
nkeynes@559
   418
nkeynes@559
   419
#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
nkeynes@559
   420
#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
nkeynes@559
   421
#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
nkeynes@559
   422
#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
nkeynes@559
   423
#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
nkeynes@559
   424
#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
nkeynes@559
   425
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@559
   426
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
nkeynes@559
   427
    return (((uint64_t)EXC_TLB_MULTI_HIT)<<32)
nkeynes@559
   428
nkeynes@559
   429
uint64_t mmu_vma_to_phys_write( sh4addr_t addr )
nkeynes@559
   430
{
nkeynes@559
   431
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@559
   432
    if( addr & 0x80000000 ) {
nkeynes@559
   433
	if( IS_SH4_PRIVMODE() ) {
nkeynes@559
   434
	    if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
nkeynes@559
   435
		/* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@559
   436
		return (uint64_t)addr;
nkeynes@559
   437
	    }
nkeynes@559
   438
	} else {
nkeynes@559
   439
	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@559
   440
		((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@559
   441
		/* Conditional user-mode access to the store-queue (no translation) */
nkeynes@559
   442
		return (uint64_t)addr;
nkeynes@559
   443
	    }
nkeynes@559
   444
	    MMU_WRITE_ADDR_ERROR();
nkeynes@559
   445
	}
nkeynes@559
   446
    }
nkeynes@559
   447
    
nkeynes@559
   448
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@559
   449
	return (uint64_t)addr;
nkeynes@559
   450
    }
nkeynes@559
   451
nkeynes@559
   452
    /* If we get this far, translation is required */
nkeynes@559
   453
nkeynes@559
   454
    int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
nkeynes@559
   455
    uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
nkeynes@559
   456
    
nkeynes@559
   457
    int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
nkeynes@559
   458
nkeynes@559
   459
    switch(entryNo) {
nkeynes@559
   460
    case -1:
nkeynes@559
   461
	MMU_TLB_WRITE_MISS_ERROR(addr);
nkeynes@559
   462
	break;
nkeynes@559
   463
    case -2:
nkeynes@559
   464
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@559
   465
	break;
nkeynes@559
   466
    default:
nkeynes@559
   467
	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
nkeynes@559
   468
	    : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
nkeynes@559
   469
	    /* protection violation */
nkeynes@559
   470
	    MMU_TLB_WRITE_PROT_ERROR(addr);
nkeynes@559
   471
	}
nkeynes@559
   472
nkeynes@559
   473
	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
nkeynes@559
   474
	    MMU_TLB_INITIAL_WRITE_ERROR(addr);
nkeynes@559
   475
	}
nkeynes@559
   476
nkeynes@559
   477
	/* finally generate the target address */
nkeynes@559
   478
	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
nkeynes@559
   479
	    (addr & (~mmu_utlb[entryNo].mask));
nkeynes@559
   480
    }
nkeynes@559
   481
    return -1;
nkeynes@559
   482
nkeynes@559
   483
}
nkeynes@559
   484
nkeynes@559
   485
uint64_t mmu_vma_to_phys_exec( sh4addr_t addr )
nkeynes@559
   486
{
nkeynes@559
   487
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@559
   488
    if( addr & 0x80000000 ) {
nkeynes@559
   489
	if( IS_SH4_PRIVMODE()  ) {
nkeynes@559
   490
	    if( addr < 0xC0000000 ) {
nkeynes@559
   491
		/* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@559
   492
		return (uint64_t)addr;
nkeynes@559
   493
	    } else if( addr >= 0xE0000000 ) {
nkeynes@559
   494
		MMU_READ_ADDR_ERROR();
nkeynes@559
   495
	    }
nkeynes@559
   496
	} else {
nkeynes@559
   497
	    MMU_READ_ADDR_ERROR();
nkeynes@559
   498
	}
nkeynes@559
   499
    }
nkeynes@559
   500
    
nkeynes@559
   501
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@559
   502
	return (uint64_t)addr;
nkeynes@559
   503
    }
nkeynes@559
   504
nkeynes@559
   505
    /* If we get this far, translation is required */
nkeynes@559
   506
    int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
nkeynes@559
   507
    uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
nkeynes@559
   508
    
nkeynes@559
   509
    int entryNo = mmu_itlb_lookup_vpn( addr, asid, use_asid );
nkeynes@559
   510
    if( entryNo == -1 ) {
nkeynes@559
   511
	entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
nkeynes@559
   512
	if( entryNo >= 0 ) {
nkeynes@559
   513
	    entryNo = mmu_itlb_update_from_utlb( entryNo );
nkeynes@559
   514
	}
nkeynes@559
   515
    }
nkeynes@559
   516
    switch(entryNo) {
nkeynes@559
   517
    case -1:
nkeynes@559
   518
	MMU_TLB_READ_MISS_ERROR(addr);
nkeynes@559
   519
	break;
nkeynes@559
   520
    case -2:
nkeynes@559
   521
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@559
   522
	break;
nkeynes@559
   523
    default:
nkeynes@559
   524
	if( (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 &&
nkeynes@559
   525
	    !IS_SH4_PRIVMODE() ) {
nkeynes@559
   526
	    /* protection violation */
nkeynes@559
   527
	    MMU_TLB_READ_PROT_ERROR(addr);
nkeynes@559
   528
	}
nkeynes@559
   529
nkeynes@559
   530
	/* finally generate the target address */
nkeynes@559
   531
	return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) | 
nkeynes@559
   532
	    (addr & (~mmu_itlb[entryNo].mask));
nkeynes@559
   533
    }
nkeynes@559
   534
    return -1;
nkeynes@559
   535
}
nkeynes@559
   536
nkeynes@559
   537
uint64_t mmu_vma_to_phys_read_noexc( sh4addr_t addr ) {
nkeynes@559
   538
nkeynes@559
   539
nkeynes@559
   540
}
nkeynes@559
   541
nkeynes@559
   542
nkeynes@559
   543
uint64_t mmu_vma_to_phys_read( sh4addr_t addr )
nkeynes@559
   544
{
nkeynes@559
   545
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@559
   546
    if( addr & 0x80000000 ) {
nkeynes@559
   547
	if( IS_SH4_PRIVMODE() ) {
nkeynes@559
   548
	    if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
nkeynes@559
   549
		/* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@559
   550
		return (uint64_t)addr;
nkeynes@559
   551
	    }
nkeynes@559
   552
	} else {
nkeynes@559
   553
	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@559
   554
		((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@559
   555
		/* Conditional user-mode access to the store-queue (no translation) */
nkeynes@559
   556
		return (uint64_t)addr;
nkeynes@559
   557
	    }
nkeynes@559
   558
	    MMU_READ_ADDR_ERROR();
nkeynes@559
   559
	}
nkeynes@559
   560
    }
nkeynes@559
   561
    
nkeynes@559
   562
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@559
   563
	return (uint64_t)addr;
nkeynes@559
   564
    }
nkeynes@559
   565
nkeynes@559
   566
    /* If we get this far, translation is required */
nkeynes@559
   567
nkeynes@559
   568
    int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
nkeynes@559
   569
    uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
nkeynes@559
   570
    
nkeynes@559
   571
    int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
nkeynes@559
   572
nkeynes@559
   573
    switch(entryNo) {
nkeynes@559
   574
    case -1:
nkeynes@559
   575
	MMU_TLB_READ_MISS_ERROR(addr);
nkeynes@559
   576
	break;
nkeynes@559
   577
    case -2:
nkeynes@559
   578
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@559
   579
	break;
nkeynes@559
   580
    default:
nkeynes@559
   581
	if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
nkeynes@559
   582
	    !IS_SH4_PRIVMODE() ) {
nkeynes@559
   583
	    /* protection violation */
nkeynes@559
   584
	    MMU_TLB_READ_PROT_ERROR(addr);
nkeynes@559
   585
	}
nkeynes@559
   586
nkeynes@559
   587
	/* finally generate the target address */
nkeynes@559
   588
	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
nkeynes@559
   589
	    (addr & (~mmu_utlb[entryNo].mask));
nkeynes@559
   590
    }
nkeynes@559
   591
    return -1;
nkeynes@550
   592
}
nkeynes@550
   593
nkeynes@550
   594
static void mmu_invalidate_tlb()
nkeynes@550
   595
{
nkeynes@550
   596
    int i;
nkeynes@550
   597
    for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
nkeynes@550
   598
	mmu_itlb[i].flags &= (~TLB_VALID);
nkeynes@550
   599
    }
nkeynes@550
   600
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@550
   601
	mmu_utlb[i].flags &= (~TLB_VALID);
nkeynes@550
   602
    }
nkeynes@550
   603
}
nkeynes@550
   604
nkeynes@550
   605
#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
nkeynes@550
   606
nkeynes@550
   607
int32_t mmu_itlb_addr_read( sh4addr_t addr )
nkeynes@550
   608
{
nkeynes@550
   609
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   610
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
nkeynes@550
   611
}
nkeynes@550
   612
int32_t mmu_itlb_data_read( sh4addr_t addr )
nkeynes@550
   613
{
nkeynes@550
   614
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   615
    return ent->ppn | ent->flags;
nkeynes@550
   616
}
nkeynes@550
   617
nkeynes@550
   618
void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   619
{
nkeynes@550
   620
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   621
    ent->vpn = val & 0xFFFFFC00;
nkeynes@550
   622
    ent->asid = val & 0x000000FF;
nkeynes@550
   623
    ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
nkeynes@550
   624
}
nkeynes@550
   625
nkeynes@550
   626
void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   627
{
nkeynes@550
   628
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   629
    ent->ppn = val & 0x1FFFFC00;
nkeynes@550
   630
    ent->flags = val & 0x00001DA;
nkeynes@559
   631
    ent->mask = get_mask_for_flags(val);
nkeynes@550
   632
}
nkeynes@550
   633
nkeynes@550
   634
#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
nkeynes@550
   635
#define UTLB_ASSOC(addr) (addr&0x80)
nkeynes@550
   636
#define UTLB_DATA2(addr) (addr&0x00800000)
nkeynes@550
   637
nkeynes@550
   638
int32_t mmu_utlb_addr_read( sh4addr_t addr )
nkeynes@550
   639
{
nkeynes@550
   640
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   641
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
nkeynes@550
   642
	((ent->flags & TLB_DIRTY)<<7);
nkeynes@550
   643
}
nkeynes@550
   644
int32_t mmu_utlb_data_read( sh4addr_t addr )
nkeynes@550
   645
{
nkeynes@550
   646
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   647
    if( UTLB_DATA2(addr) ) {
nkeynes@550
   648
	return ent->pcmcia;
nkeynes@550
   649
    } else {
nkeynes@550
   650
	return ent->ppn | ent->flags;
nkeynes@550
   651
    }
nkeynes@550
   652
}
nkeynes@550
   653
nkeynes@550
   654
void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   655
{
nkeynes@550
   656
    if( UTLB_ASSOC(addr) ) {
nkeynes@559
   657
	uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
nkeynes@559
   658
	int entryNo = mmu_utlb_lookup_assoc( val, asid );
nkeynes@559
   659
	if( entryNo >= 0 ) {
nkeynes@559
   660
	    struct utlb_entry *ent = &mmu_utlb[entryNo];
nkeynes@559
   661
	    ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
nkeynes@559
   662
	    ent->flags |= (val & TLB_VALID);
nkeynes@559
   663
	    ent->flags |= ((val & 0x200)>>7);
nkeynes@559
   664
	} else if( entryNo == -2 ) {
nkeynes@559
   665
	    MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@559
   666
	}
nkeynes@550
   667
    } else {
nkeynes@550
   668
	struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   669
	ent->vpn = (val & 0xFFFFFC00);
nkeynes@550
   670
	ent->asid = (val & 0xFF);
nkeynes@550
   671
	ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
nkeynes@550
   672
	ent->flags |= (val & TLB_VALID);
nkeynes@550
   673
	ent->flags |= ((val & 0x200)>>7);
nkeynes@550
   674
    }
nkeynes@550
   675
}
nkeynes@550
   676
nkeynes@550
   677
void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   678
{
nkeynes@550
   679
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   680
    if( UTLB_DATA2(addr) ) {
nkeynes@550
   681
	ent->pcmcia = val & 0x0000000F;
nkeynes@550
   682
    } else {
nkeynes@550
   683
	ent->ppn = (val & 0x1FFFFC00);
nkeynes@550
   684
	ent->flags = (val & 0x000001FF);
nkeynes@559
   685
	ent->mask = get_mask_for_flags(val);
nkeynes@550
   686
    }
nkeynes@550
   687
}
nkeynes@550
   688
nkeynes@550
   689
/* Cache access - not implemented */
nkeynes@550
   690
nkeynes@550
   691
int32_t mmu_icache_addr_read( sh4addr_t addr )
nkeynes@550
   692
{
nkeynes@550
   693
    return 0; // not implemented
nkeynes@550
   694
}
nkeynes@550
   695
int32_t mmu_icache_data_read( sh4addr_t addr )
nkeynes@550
   696
{
nkeynes@550
   697
    return 0; // not implemented
nkeynes@550
   698
}
nkeynes@550
   699
int32_t mmu_ocache_addr_read( sh4addr_t addr )
nkeynes@550
   700
{
nkeynes@550
   701
    return 0; // not implemented
nkeynes@550
   702
}
nkeynes@550
   703
int32_t mmu_ocache_data_read( sh4addr_t addr )
nkeynes@550
   704
{
nkeynes@550
   705
    return 0; // not implemented
nkeynes@550
   706
}
nkeynes@550
   707
nkeynes@550
   708
void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   709
{
nkeynes@550
   710
}
nkeynes@550
   711
nkeynes@550
   712
void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   713
{
nkeynes@550
   714
}
nkeynes@550
   715
nkeynes@550
   716
void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   717
{
nkeynes@550
   718
}
nkeynes@550
   719
nkeynes@550
   720
void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   721
{
nkeynes@550
   722
}
.