filename | src/sh4/mmu.c |
changeset | 946:d41ee7994db7 |
prev | 943:9a277733eafa |
next | 948:545c85cc56f1 |
author | nkeynes |
date | Tue Jan 06 01:58:08 2009 +0000 (13 years ago) |
branch | lxdream-mem |
permissions | -rw-r--r-- |
last change | Fully integrate SQ with the new address space code - added additional 'prefetch' memory accessor. TLB is utterly untested, but non-TLB at least still works. |
file | annotate | diff | log | raw |
1.1 --- a/src/sh4/mmu.c Mon Jan 05 04:16:28 2009 +00001.2 +++ b/src/sh4/mmu.c Tue Jan 06 01:58:08 2009 +00001.3 @@ -47,10 +47,6 @@1.4 mem_region_fn_t *sh4_address_space;1.5 mem_region_fn_t *sh4_user_address_space;1.7 -/* MMU-mapped storequeue targets. Only used with TLB on */1.8 -mem_region_fn_t *storequeue_address_space;1.9 -mem_region_fn_t *storequeue_user_address_space;1.10 -1.11 /* Accessed from the UTLB accessor methods */1.12 uint32_t mmu_urc;1.13 uint32_t mmu_urb;1.14 @@ -61,6 +57,7 @@1.15 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];1.16 static uint32_t mmu_lrui;1.17 static uint32_t mmu_asid; // current asid1.18 +static struct utlb_default_regions *mmu_user_storequeue_regions;1.20 /* Structures for 1K page handling */1.21 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];1.22 @@ -77,7 +74,7 @@1.23 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );1.24 static void mmu_set_tlb_enabled( int tlb_on );1.25 static void mmu_set_tlb_asid( uint32_t asid );1.26 -static void mmu_set_storequeue_protected( int protected );1.27 +static void mmu_set_storequeue_protected( int protected, int tlb_on );1.28 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );1.29 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );1.30 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );1.31 @@ -86,12 +83,23 @@1.32 static struct utlb_1k_entry *mmu_utlb_1k_alloc();1.33 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );1.35 +static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );1.36 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );1.37 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );1.38 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );1.39 static uint32_t get_tlb_size_mask( uint32_t flags );1.40 static uint32_t get_tlb_size_pages( uint32_t flags );1.42 +#define DEFAULT_REGIONS 01.43 +#define DEFAULT_STOREQUEUE_REGIONS 11.44 +#define DEFAULT_STOREQUEUE_SQMD_REGIONS 21.45 +1.46 +static struct utlb_default_regions mmu_default_regions[3] = {1.47 + { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },1.48 + { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },1.49 + { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };1.50 +1.51 +#define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])1.53 /*********************** Module public functions ****************************/1.55 @@ -104,12 +112,11 @@1.56 {1.57 sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );1.58 sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );1.59 - storequeue_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );1.60 - storequeue_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );1.61 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];1.63 mmu_set_tlb_enabled(0);1.64 mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );1.65 - mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue);1.66 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );1.68 /* Setup P4 tlb/cache access regions */1.69 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );1.70 @@ -186,7 +193,7 @@1.72 uint32_t mmucr = MMIO_READ(MMU,MMUCR);1.73 mmu_set_tlb_enabled(mmucr&MMUCR_AT);1.74 - mmu_set_storequeue_protected(mmucr&MMUCR_SQMD);1.75 + mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);1.76 return 0;1.77 }1.79 @@ -262,7 +269,7 @@1.80 val &= 0x00000301;1.81 tmp = MMIO_READ( MMU, MMUCR );1.82 if( (val ^ tmp) & (MMUCR_SQMD) ) {1.83 - mmu_set_storequeue_protected( val & MMUCR_SQMD );1.84 + mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );1.85 }1.86 if( (val ^ tmp) & (MMUCR_AT) ) {1.87 // AT flag has changed state - flush the xlt cache as all bets1.88 @@ -387,15 +394,16 @@1.89 mem_region_fn_t *ptr, *uptr;1.90 int i;1.92 + /* Reset the storequeue area */1.93 +1.94 if( tlb_on ) {1.95 mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );1.96 mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );1.97 mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );1.98 - for( i=0, ptr = storequeue_address_space, uptr = storequeue_user_address_space;1.99 - i<0x04000000; i+= LXDREAM_PAGE_SIZE ) {1.100 - *ptr++ = &mem_region_tlb_miss;1.101 - *uptr++ = &mem_region_tlb_miss;1.102 - }1.103 +1.104 + /* Default SQ prefetch goes to TLB miss (?) */1.105 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );1.106 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );1.107 mmu_utlb_register_all();1.108 } else {1.109 for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {1.110 @@ -404,16 +412,45 @@1.111 for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {1.112 memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );1.113 }1.114 +1.115 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );1.116 + if( IS_STOREQUEUE_PROTECTED() ) {1.117 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );1.118 + } else {1.119 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );1.120 + }1.121 }1.122 +1.123 }1.125 -static void mmu_set_storequeue_protected( int protected )1.126 +/**1.127 + * Flip the SQMD switch - this is rather expensive, so will need to be changed if1.128 + * anything expects to do this frequently.1.129 + */1.130 +static void mmu_set_storequeue_protected( int protected, int tlb_on )1.131 {1.132 + mem_region_fn_t nontlb_region;1.133 + int i;1.134 +1.135 if( protected ) {1.136 - mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &mem_region_address_error );1.137 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];1.138 + nontlb_region = &p4_region_storequeue_sqmd;1.139 } else {1.140 - mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );1.141 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];1.142 + nontlb_region = &p4_region_storequeue;1.143 }1.144 +1.145 + if( tlb_on ) {1.146 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );1.147 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {1.148 + if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {1.149 + mmu_utlb_insert_entry(i);1.150 + }1.151 + }1.152 + } else {1.153 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region );1.154 + }1.155 +1.156 }1.158 static void mmu_set_tlb_asid( uint32_t asid )1.159 @@ -488,13 +525,16 @@1.160 {1.161 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];1.162 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];1.163 + struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];1.164 + struct utlb_default_regions *userdefs = privdefs;1.165 +1.166 gboolean mapping_ok = TRUE;1.167 int i;1.169 if( (start_addr & 0xFC000000) == 0xE0000000 ) {1.170 /* Storequeue mapping */1.171 - ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];1.172 - uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];1.173 + privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];1.174 + userdefs = mmu_user_storequeue_regions;1.175 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {1.176 user_page = NULL; /* No user access to P3 region */1.177 } else if( start_addr >= 0x80000000 ) {1.178 @@ -518,58 +558,47 @@1.179 }1.181 if( priv_page != NULL ) {1.182 - if( ent->subpages[idx] == &mem_region_tlb_miss ) {1.183 + if( ent->subpages[idx] == privdefs->tlb_miss ) {1.184 ent->subpages[idx] = priv_page;1.185 } else {1.186 mapping_ok = FALSE;1.187 - ent->subpages[idx] = &mem_region_tlb_multihit;1.188 + ent->subpages[idx] = privdefs->tlb_multihit;1.189 }1.190 }1.191 if( user_page != NULL ) {1.192 - if( ent->user_subpages[idx] == &mem_region_tlb_miss ) {1.193 + if( ent->user_subpages[idx] == userdefs->tlb_miss ) {1.194 ent->user_subpages[idx] = user_page;1.195 } else {1.196 mapping_ok = FALSE;1.197 - ent->user_subpages[idx] = &mem_region_tlb_multihit;1.198 + ent->user_subpages[idx] = userdefs->tlb_multihit;1.199 }1.200 }1.202 } else {1.203 if( priv_page != NULL ) {1.204 - if( user_page != NULL ) {1.205 - for( i=0; i<npages; i++ ) {1.206 - if( *ptr == &mem_region_tlb_miss ) {1.207 - *ptr++ = priv_page;1.208 - *uptr++ = user_page;1.209 - } else {1.210 - mapping_ok = FALSE;1.211 - *ptr++ = &mem_region_tlb_multihit;1.212 - *uptr++ = &mem_region_tlb_multihit;1.213 - }1.214 - }1.215 - } else {1.216 - /* Privileged mapping only */1.217 - for( i=0; i<npages; i++ ) {1.218 - if( *ptr == &mem_region_tlb_miss ) {1.219 - *ptr++ = priv_page;1.220 - } else {1.221 - mapping_ok = FALSE;1.222 - *ptr++ = &mem_region_tlb_multihit;1.223 - }1.224 + /* Privileged mapping only */1.225 + for( i=0; i<npages; i++ ) {1.226 + if( *ptr == privdefs->tlb_miss ) {1.227 + *ptr++ = priv_page;1.228 + } else {1.229 + mapping_ok = FALSE;1.230 + *ptr++ = privdefs->tlb_multihit;1.231 }1.232 }1.233 - } else if( user_page != NULL ) {1.234 + }1.235 + if( user_page != NULL ) {1.236 /* User mapping only (eg ASID change remap w/ SV=1) */1.237 for( i=0; i<npages; i++ ) {1.238 - if( *uptr == &mem_region_tlb_miss ) {1.239 + if( *uptr == userdefs->tlb_miss ) {1.240 *uptr++ = user_page;1.241 } else {1.242 mapping_ok = FALSE;1.243 - *uptr++ = &mem_region_tlb_multihit;1.244 + *uptr++ = userdefs->tlb_multihit;1.245 }1.246 }1.247 }1.248 }1.249 +1.250 return mapping_ok;1.251 }1.253 @@ -621,13 +650,16 @@1.254 {1.255 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];1.256 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];1.257 + struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];1.258 + struct utlb_default_regions *userdefs = privdefs;1.259 +1.260 gboolean unmapping_ok = TRUE;1.261 int i;1.263 if( (start_addr & 0xFC000000) == 0xE0000000 ) {1.264 /* Storequeue mapping */1.265 - ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];1.266 - uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];1.267 + privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];1.268 + userdefs = mmu_user_storequeue_regions;1.269 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {1.270 unmap_user = FALSE;1.271 } else if( start_addr >= 0x80000000 ) {1.272 @@ -638,13 +670,13 @@1.273 assert( IS_1K_PAGE_ENTRY( *ptr ) );1.274 struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;1.275 int i, idx = (start_addr >> 10) & 0x03, mergeable=1;1.276 - if( ent->subpages[idx] == &mem_region_tlb_multihit ) {1.277 + if( ent->subpages[idx] == privdefs->tlb_multihit ) {1.278 unmapping_ok = FALSE;1.279 }1.280 if( unmap_priv )1.281 - ent->subpages[idx] = &mem_region_tlb_miss;1.282 + ent->subpages[idx] = privdefs->tlb_miss;1.283 if( unmap_user )1.284 - ent->user_subpages[idx] = &mem_region_tlb_miss;1.285 + ent->user_subpages[idx] = userdefs->tlb_miss;1.287 /* If all 4 subpages have the same content, merge them together and1.288 * release the 1K entry1.289 @@ -664,30 +696,21 @@1.290 }1.291 } else {1.292 if( unmap_priv ) {1.293 - if( unmap_user ) {1.294 - for( i=0; i<npages; i++ ) {1.295 - if( *ptr == &mem_region_tlb_multihit ) {1.296 - unmapping_ok = FALSE;1.297 - }1.298 - *ptr++ = &mem_region_tlb_miss;1.299 - *uptr++ = &mem_region_tlb_miss;1.300 - }1.301 - } else {1.302 - /* Privileged (un)mapping only */1.303 - for( i=0; i<npages; i++ ) {1.304 - if( *ptr == &mem_region_tlb_multihit ) {1.305 - unmapping_ok = FALSE;1.306 - }1.307 - *ptr++ = &mem_region_tlb_miss;1.308 - }1.309 - }1.310 - } else if( unmap_user ) {1.311 - /* User (un)mapping only */1.312 + /* Privileged (un)mapping */1.313 for( i=0; i<npages; i++ ) {1.314 - if( *uptr == &mem_region_tlb_multihit ) {1.315 + if( *ptr == privdefs->tlb_multihit ) {1.316 unmapping_ok = FALSE;1.317 }1.318 - *uptr++ = &mem_region_tlb_miss;1.319 + *ptr++ = privdefs->tlb_miss;1.320 + }1.321 + }1.322 + if( unmap_user ) {1.323 + /* User (un)mapping */1.324 + for( i=0; i<npages; i++ ) {1.325 + if( *uptr == userdefs->tlb_multihit ) {1.326 + unmapping_ok = FALSE;1.327 + }1.328 + *uptr++ = userdefs->tlb_miss;1.329 }1.330 }1.331 }1.332 @@ -703,28 +726,47 @@1.333 sh4addr_t start_addr = ent->vpn & ent->mask;1.334 int npages = get_tlb_size_pages(ent->flags);1.336 - if( (ent->flags & TLB_USERMODE) == 0 ) {1.337 - upage = &mem_region_user_protected;1.338 - } else {1.339 - upage = page;1.340 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {1.341 + /* Store queue mappings are a bit different - normal access is fixed to1.342 + * the store queue register block, and we only map prefetches through1.343 + * the TLB1.344 + */1.345 + mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );1.346 +1.347 + if( (ent->flags & TLB_USERMODE) == 0 ) {1.348 + upage = mmu_user_storequeue_regions->tlb_prot;1.349 + } else if( IS_STOREQUEUE_PROTECTED() ) {1.350 + upage = &p4_region_storequeue_sqmd;1.351 + } else {1.352 + upage = page;1.353 + }1.354 +1.355 + } else {1.356 +1.357 + if( (ent->flags & TLB_USERMODE) == 0 ) {1.358 + upage = &mem_region_tlb_protected;1.359 + } else {1.360 + upage = page;1.361 + }1.362 +1.363 + if( (ent->flags & TLB_WRITABLE) == 0 ) {1.364 + page->write_long = (mem_write_fn_t)tlb_protected_write;1.365 + page->write_word = (mem_write_fn_t)tlb_protected_write;1.366 + page->write_byte = (mem_write_fn_t)tlb_protected_write;1.367 + page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;1.368 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );1.369 + } else if( (ent->flags & TLB_DIRTY) == 0 ) {1.370 + page->write_long = (mem_write_fn_t)tlb_initial_write;1.371 + page->write_word = (mem_write_fn_t)tlb_initial_write;1.372 + page->write_byte = (mem_write_fn_t)tlb_initial_write;1.373 + page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;1.374 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );1.375 + } else {1.376 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );1.377 + }1.378 }1.379 +1.380 mmu_utlb_pages[entry].user_fn = upage;1.381 -1.382 - if( (ent->flags & TLB_WRITABLE) == 0 ) {1.383 - page->write_long = (mem_write_fn_t)tlb_protected_write;1.384 - page->write_word = (mem_write_fn_t)tlb_protected_write;1.385 - page->write_byte = (mem_write_fn_t)tlb_protected_write;1.386 - page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;1.387 - mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );1.388 - } else if( (ent->flags & TLB_DIRTY) == 0 ) {1.389 - page->write_long = (mem_write_fn_t)tlb_initial_write;1.390 - page->write_word = (mem_write_fn_t)tlb_initial_write;1.391 - page->write_byte = (mem_write_fn_t)tlb_initial_write;1.392 - page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;1.393 - mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );1.394 - } else {1.395 - mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );1.396 - }1.398 /* Is page visible? */1.399 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {1.400 @@ -1124,24 +1166,6 @@1.401 }1.402 }1.404 -void FASTCALL sh4_flush_store_queue( sh4addr_t addr )1.405 -{1.406 - int queue = (addr&0x20)>>2;1.407 - uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;1.408 - sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];1.409 - sh4addr_t target = (addr&0x03FFFFE0) | hi;1.410 - ext_address_space[target>>12]->write_burst( target, src );1.411 -}1.412 -1.413 -void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc )1.414 -{1.415 - int queue = (addr&0x20)>>2;1.416 - sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];1.417 - sh4addr_t target;1.418 - /* Store queue operation */1.419 - storequeue_address_space[(addr&0x03FFFFFE0)>>12]->write_burst( addr, src);1.420 -}1.421 -1.422 /********************** TLB Direct-Access Regions ***************************/1.423 #ifdef HAVE_FRAME_ADDRESS1.424 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)1.425 @@ -1308,22 +1332,26 @@1.426 mmu_itlb_addr_read, mmu_itlb_addr_write,1.427 mmu_itlb_addr_read, mmu_itlb_addr_write,1.428 mmu_itlb_addr_read, mmu_itlb_addr_write,1.429 - unmapped_read_burst, unmapped_write_burst };1.430 + unmapped_read_burst, unmapped_write_burst,1.431 + unmapped_prefetch };1.432 struct mem_region_fn p4_region_itlb_data = {1.433 mmu_itlb_data_read, mmu_itlb_data_write,1.434 mmu_itlb_data_read, mmu_itlb_data_write,1.435 mmu_itlb_data_read, mmu_itlb_data_write,1.436 - unmapped_read_burst, unmapped_write_burst };1.437 + unmapped_read_burst, unmapped_write_burst,1.438 + unmapped_prefetch };1.439 struct mem_region_fn p4_region_utlb_addr = {1.440 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,1.441 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,1.442 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,1.443 - unmapped_read_burst, unmapped_write_burst };1.444 + unmapped_read_burst, unmapped_write_burst,1.445 + unmapped_prefetch };1.446 struct mem_region_fn p4_region_utlb_data = {1.447 mmu_utlb_data_read, mmu_utlb_data_write,1.448 mmu_utlb_data_read, mmu_utlb_data_write,1.449 mmu_utlb_data_read, mmu_utlb_data_write,1.450 - unmapped_read_burst, unmapped_write_burst };1.451 + unmapped_read_burst, unmapped_write_burst,1.452 + unmapped_prefetch };1.454 /********************** Error regions **************************/1.456 @@ -1417,25 +1445,92 @@1.457 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.458 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.459 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.460 - (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write };1.461 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,1.462 + unmapped_prefetch };1.464 struct mem_region_fn mem_region_tlb_miss = {1.465 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,1.466 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,1.467 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,1.468 - (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write };1.469 + (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,1.470 + unmapped_prefetch };1.472 -struct mem_region_fn mem_region_user_protected = {1.473 +struct mem_region_fn mem_region_tlb_protected = {1.474 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,1.475 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,1.476 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,1.477 - (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write };1.478 + (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,1.479 + unmapped_prefetch };1.481 struct mem_region_fn mem_region_tlb_multihit = {1.482 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,1.483 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,1.484 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,1.485 - (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write };1.486 + (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,1.487 + (mem_prefetch_fn_t)tlb_multi_hit_read };1.489 +1.490 +/* Store-queue regions */1.491 +/* These are a bit of a pain - the first 8 fields are controlled by SQMD, while1.492 + * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in1.493 + * some cases), in contrast to the ordinary fields above.1.494 + *1.495 + * There is probably a simpler way to do this.1.496 + */1.497 +1.498 +struct mem_region_fn p4_region_storequeue = {1.499 + ccn_storequeue_read_long, ccn_storequeue_write_long,1.500 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */1.501 + unmapped_read_long, unmapped_write_long,1.502 + unmapped_read_burst, unmapped_write_burst,1.503 + ccn_storequeue_prefetch };1.504 +1.505 +struct mem_region_fn p4_region_storequeue_miss = {1.506 + ccn_storequeue_read_long, ccn_storequeue_write_long,1.507 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */1.508 + unmapped_read_long, unmapped_write_long,1.509 + unmapped_read_burst, unmapped_write_burst,1.510 + (mem_prefetch_fn_t)tlb_miss_read };1.511 +1.512 +struct mem_region_fn p4_region_storequeue_multihit = {1.513 + ccn_storequeue_read_long, ccn_storequeue_write_long,1.514 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */1.515 + unmapped_read_long, unmapped_write_long,1.516 + unmapped_read_burst, unmapped_write_burst,1.517 + (mem_prefetch_fn_t)tlb_multi_hit_read };1.518 +1.519 +struct mem_region_fn p4_region_storequeue_protected = {1.520 + ccn_storequeue_read_long, ccn_storequeue_write_long,1.521 + unmapped_read_long, unmapped_write_long,1.522 + unmapped_read_long, unmapped_write_long,1.523 + unmapped_read_burst, unmapped_write_burst,1.524 + (mem_prefetch_fn_t)tlb_protected_read };1.525 +1.526 +struct mem_region_fn p4_region_storequeue_sqmd = {1.527 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.528 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.529 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.530 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,1.531 + (mem_prefetch_fn_t)address_error_read };1.533 -1.534 \ No newline at end of file1.535 +struct mem_region_fn p4_region_storequeue_sqmd_miss = {1.536 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.537 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.538 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.539 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,1.540 + (mem_prefetch_fn_t)tlb_miss_read };1.541 +1.542 +struct mem_region_fn p4_region_storequeue_sqmd_multihit = {1.543 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.544 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.545 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.546 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,1.547 + (mem_prefetch_fn_t)tlb_multi_hit_read };1.548 +1.549 +struct mem_region_fn p4_region_storequeue_sqmd_protected = {1.550 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.551 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.552 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,1.553 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,1.554 + (mem_prefetch_fn_t)tlb_protected_read };1.555 +
.