4 * x86-specific MMU code - this emits simple TLB stubs for TLB indirection.
6 * Copyright (c) 2008 Nathan Keynes.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
21 #include "sh4/sh4core.h"
22 #include "sh4/sh4mmio.h"
23 #include "sh4/sh4trans.h"
25 #include "sh4/x86op.h"
27 #if SIZEOF_VOID_P == 8
31 MOV_imm64_r32((uintptr_t)addr_space, R_EAX); /* movq ptr, %rax */ \
32 REXW(); OP(0x8B); OP(0x0C); OP(0xC8) /* movq [%rax + %rcx*8], %rcx */
37 MOV_r32disp32x4_r32( R_ECX, (uintptr_t)addr_space, R_ECX );
40 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
42 uint32_t mask = ent->mask;
43 uint32_t vpn = ent->vpn & mask;
44 uint32_t ppn = ent->ppn & mask;
45 struct mem_region_fn **addr_space;
46 uint8_t **out = (uint8_t **)&page->fn;
48 int inc = writable ? 1 : 2;
51 xlat_output = page->code;
52 if( (ppn & 0x1FFFFFFF) >= 0x1C000000 ) {
53 /* SH4 control region */
55 addr_space = sh4_address_space;
57 addr_space = ext_address_space;
59 fn = (uint8_t **)addr_space[ppn>>12];
61 for( i=0; i<10; i+= inc, fn += inc, out += inc ) {
63 if( i != 9 ) { /* read_byte_for_write doesn't increment mmu_urc, everything else does */
64 #if SIZEOF_VOID_P == 8
65 MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
66 OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
68 OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
71 ADD_imm32_r32( ppn-vpn, ARG1 ); // 6
72 if( ent->mask >= 0xFFFFF000 ) {
73 // Maps to a single page, so jump directly there
74 int rel = (*fn - xlat_output);
77 MOV_r32_r32( ARG1, R_ECX ); // 2
78 SHR_imm8_r32( 12, R_ECX ); // 3
80 JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) ); // 3
84 page->fn.prefetch = unmapped_prefetch; // FIXME
87 void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
89 uint32_t mask = ent->mask;
90 uint32_t vpn = ent->vpn & mask;
91 uint32_t ppn = ent->ppn & mask;
93 xlat_output = page->code;
95 memcpy( page, &p4_region_storequeue, sizeof(struct mem_region_fn) );
97 page->fn.prefetch = (mem_prefetch_fn_t)xlat_output;
98 #if SIZEOF_VOID_P == 8
99 MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
100 OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
102 OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
104 ADD_imm32_r32( ppn-vpn, ARG1 );
105 int rel = ((uint8_t *)ccn_storequeue_prefetch_tlb) - xlat_output;
109 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
111 xlat_output = entry->code;
113 uint8_t **out = (uint8_t **)&entry->fn;
115 for( i=0; i<9; i++, out++ ) {
117 MOV_r32_r32( ARG1, R_ECX );
118 SHR_imm8_r32( 10, R_ECX );
119 AND_imm8s_r32( 0x3, R_ECX );
120 #if SIZEOF_VOID_P == 8
121 MOV_imm64_r32( (uintptr_t)&entry->subpages[0], R_EAX );
122 REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
124 MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->subpages[0]), R_ECX );
126 JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) ); // 3
129 out = (uint8_t **)&entry->user_fn;
130 for( i=0; i<9; i++, out++ ) {
132 MOV_r32_r32( ARG1, R_ECX );
133 SHR_imm8_r32( 10, R_ECX );
134 AND_imm8s_r32( 0x3, R_ECX );
135 #if SIZEOF_VOID_P == 8
136 MOV_imm64_r32( (uintptr_t)&entry->user_subpages[0], R_EAX );
137 REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
139 MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->user_subpages[0]), R_ECX );
141 JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) ); // 3
.