4 * x86-specific MMU code - this emits simple TLB stubs for TLB indirection.
6 * Copyright (c) 2008 Nathan Keynes.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
21 #include "sh4/sh4core.h"
22 #include "sh4/sh4mmio.h"
23 #include "sh4/sh4trans.h"
25 #include "sh4/x86op.h"
27 #if SIZEOF_VOID_P == 8
31 MOV_imm64_r32((uintptr_t)addr_space, R_EAX); /* movq ptr, %rax */ \
32 REXW(); OP(0x8B); OP(0x0C); OP(0xC8) /* movq [%rax + %rcx*8], %rcx */
37 MOV_r32disp32x4_r32( R_ECX, (uintptr_t)addr_space, R_ECX );
40 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
42 uint32_t mask = ent->mask;
43 uint32_t vpn = ent->vpn & mask;
44 uint32_t ppn = ent->ppn & mask;
45 struct mem_region_fn **addr_space;
46 uint8_t **out = (uint8_t **)&page->fn;
48 int inc = writable ? 1 : 2;
51 xlat_output = page->code;
52 if( (ppn & 0x1FFFFFFF) >= 0x1C000000 ) {
53 /* SH4 control region */
55 addr_space = sh4_address_space;
57 addr_space = ext_address_space;
59 fn = (uint8_t **)addr_space[ppn>>12];
61 for( i=0; i<9; i+= inc, fn += inc, out += inc ) {
63 #if SIZEOF_VOID_P == 8
64 MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
65 OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
67 OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
69 ADD_imm32_r32( ppn-vpn, ARG1 ); // 6
70 if( ent->mask >= 0xFFFFF000 ) {
71 // Maps to a single page, so jump directly there
72 int rel = (*fn - xlat_output);
75 MOV_r32_r32( ARG1, R_ECX ); // 2
76 SHR_imm8_r32( 12, R_ECX ); // 3
78 JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) ); // 3
82 page->fn.prefetch = unmapped_prefetch; // FIXME
85 void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
87 uint32_t mask = ent->mask;
88 uint32_t vpn = ent->vpn & mask;
89 uint32_t ppn = ent->ppn & mask;
91 xlat_output = page->code;
93 memcpy( page, &p4_region_storequeue, sizeof(struct mem_region_fn) );
95 /* TESTME: Does a PREF increment the URC counter? */
96 page->fn.prefetch = (mem_prefetch_fn_t)xlat_output;
97 ADD_imm32_r32( ppn-vpn, ARG1 );
98 int rel = ((uint8_t *)ccn_storequeue_prefetch_tlb) - xlat_output;
102 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
104 xlat_output = entry->code;
106 uint8_t **out = (uint8_t **)&entry->fn;
108 for( i=0; i<9; i++, out++ ) {
110 MOV_r32_r32( ARG1, R_ECX );
111 SHR_imm8_r32( 10, R_ECX );
112 AND_imm8s_r32( 0x3, R_ECX );
113 #if SIZEOF_VOID_P == 8
114 MOV_imm64_r32( (uintptr_t)&entry->subpages[0], R_EAX );
115 REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
117 MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->subpages[0]), R_ECX );
119 JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) ); // 3
122 out = (uint8_t **)&entry->user_fn;
123 for( i=0; i<9; i++, out++ ) {
125 MOV_r32_r32( ARG1, R_ECX );
126 SHR_imm8_r32( 10, R_ECX );
127 AND_imm8s_r32( 0x3, R_ECX );
128 #if SIZEOF_VOID_P == 8
129 MOV_imm64_r32( (uintptr_t)&entry->user_subpages[0], R_EAX );
130 REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
132 MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->user_subpages[0]), R_ECX );
134 JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) ); // 3
.