filename | src/sh4/mmux86.c |
changeset | 975:007bf7eb944f |
prev | 972:fb948057cf08 |
next | 991:60c7fab9c880 |
author | nkeynes |
date | Wed Feb 25 09:00:05 2009 +0000 (15 years ago) |
permissions | -rw-r--r-- |
last change | Argh. Apparently we still do really need _BSD_SOURCE and _GNU_SOURCE I think that's everything now... |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * x86-specific MMU code - this emits simple TLB stubs for TLB indirection.
5 *
6 * Copyright (c) 2008 Nathan Keynes.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
19 #include "lxdream.h"
20 #include "mem.h"
21 #include "sh4/sh4core.h"
22 #include "sh4/sh4mmio.h"
23 #include "sh4/sh4trans.h"
24 #include "sh4/mmu.h"
25 #include "sh4/x86op.h"
27 #if SIZEOF_VOID_P == 8
28 #define ARG1 R_EDI
29 #define ARG2 R_ESI
30 #define DECODE() \
31 MOV_imm64_r32((uintptr_t)addr_space, R_EAX); /* movq ptr, %rax */ \
32 REXW(); OP(0x8B); OP(0x0C); OP(0xC8) /* movq [%rax + %rcx*8], %rcx */
33 #else
34 #define ARG1 R_EAX
35 #define ARG2 R_EDX
36 #define DECODE() \
37 MOV_r32disp32x4_r32( R_ECX, (uintptr_t)addr_space, R_ECX );
38 #endif
40 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
41 {
42 uint32_t mask = ent->mask;
43 uint32_t vpn = ent->vpn & mask;
44 uint32_t ppn = ent->ppn & mask;
45 struct mem_region_fn **addr_space;
46 uint8_t **out = (uint8_t **)&page->fn;
47 uint8_t **fn;
48 int inc = writable ? 1 : 2;
49 int i;
51 xlat_output = page->code;
52 if( (ppn & 0x1FFFFFFF) >= 0x1C000000 ) {
53 /* SH4 control region */
54 ppn |= 0xE0000000;
55 addr_space = sh4_address_space;
56 } else {
57 addr_space = ext_address_space;
58 }
59 fn = (uint8_t **)addr_space[ppn>>12];
61 for( i=0; i<10; i+= inc, fn += inc, out += inc ) {
62 *out = xlat_output;
63 if( i != 9 ) { /* read_byte_for_write doesn't increment mmu_urc, everything else does */
64 #if SIZEOF_VOID_P == 8
65 MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
66 OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
67 #else
68 OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
69 #endif
70 }
71 ADD_imm32_r32( ppn-vpn, ARG1 ); // 6
72 if( ent->mask >= 0xFFFFF000 ) {
73 // Maps to a single page, so jump directly there
74 int rel = (*fn - xlat_output);
75 JMP_rel( rel ); // 5
76 } else {
77 MOV_r32_r32( ARG1, R_ECX ); // 2
78 SHR_imm8_r32( 12, R_ECX ); // 3
79 DECODE(); // 14
80 JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) ); // 3
81 }
82 }
84 page->fn.prefetch = unmapped_prefetch; // FIXME
85 }
87 void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
88 {
89 uint32_t mask = ent->mask;
90 uint32_t vpn = ent->vpn & mask;
91 uint32_t ppn = ent->ppn & mask;
93 xlat_output = page->code;
95 memcpy( page, &p4_region_storequeue, sizeof(struct mem_region_fn) );
97 page->fn.prefetch = (mem_prefetch_fn_t)xlat_output;
98 #if SIZEOF_VOID_P == 8
99 MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
100 OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
101 #else
102 OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
103 #endif
104 ADD_imm32_r32( ppn-vpn, ARG1 );
105 int rel = ((uint8_t *)ccn_storequeue_prefetch_tlb) - xlat_output;
106 JMP_rel( rel );
107 }
109 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
110 {
111 xlat_output = entry->code;
112 int i;
113 uint8_t **out = (uint8_t **)&entry->fn;
115 for( i=0; i<9; i++, out++ ) {
116 *out = xlat_output;
117 MOV_r32_r32( ARG1, R_ECX );
118 SHR_imm8_r32( 10, R_ECX );
119 AND_imm8s_r32( 0x3, R_ECX );
120 #if SIZEOF_VOID_P == 8
121 MOV_imm64_r32( (uintptr_t)&entry->subpages[0], R_EAX );
122 REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
123 #else
124 MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->subpages[0]), R_ECX );
125 #endif
126 JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) ); // 3
127 }
129 out = (uint8_t **)&entry->user_fn;
130 for( i=0; i<9; i++, out++ ) {
131 *out = xlat_output;
132 MOV_r32_r32( ARG1, R_ECX );
133 SHR_imm8_r32( 10, R_ECX );
134 AND_imm8s_r32( 0x3, R_ECX );
135 #if SIZEOF_VOID_P == 8
136 MOV_imm64_r32( (uintptr_t)&entry->user_subpages[0], R_EAX );
137 REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
138 #else
139 MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->user_subpages[0]), R_ECX );
140 #endif
141 JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) ); // 3
142 }
144 }
.