Search
lxdream.org :: lxdream/src/sh4/mmux86.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmux86.c
changeset 946:d41ee7994db7
prev942:05e5d6a62e67
next1067:d3c00ffccfcd
author nkeynes
date Tue Jan 06 01:58:08 2009 +0000 (15 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Fully integrate SQ with the new address space code - added additional 'prefetch'
memory accessor. TLB is utterly untested, but non-TLB at least still works.
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * x86-specific MMU code - this emits simple TLB stubs for TLB indirection.
     5   *
     6  * Copyright (c) 2008 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #include "lxdream.h"
    20 #include "mem.h"
    21 #include "sh4/sh4core.h"
    22 #include "sh4/sh4mmio.h"
    23 #include "sh4/sh4trans.h"
    24 #include "sh4/mmu.h"
    25 #include "sh4/x86op.h"
    27 #if SIZEOF_VOID_P == 8
    28 #define ARG1 R_EDI
    29 #define ARG2 R_ESI
    30 #define DECODE() \
    31     MOV_imm64_r32((uintptr_t)ext_address_space, R_EAX);     /* movq ptr, %rax */ \
    32     REXW(); OP(0x8B); OP(0x0C); OP(0xC8)                    /* movq [%rax + %rcx*8], %rcx */
    33 #else
    34 #define ARG1 R_EAX
    35 #define ARG2 R_EDX
    36 #define DECODE() \
    37     MOV_r32disp32x4_r32( R_ECX, (uintptr_t)ext_address_space, R_ECX );
    38 #endif
    40 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
    41 {
    42     uint32_t mask = ent->mask;
    43     uint32_t vpn = ent->vpn & mask;
    44     uint32_t ppn = ent->ppn & mask;
    45     int inc = writable ? 1 : 2; 
    46     int i;
    48     xlat_output = page->code;
    49     uint8_t **fn = (uint8_t **)ext_address_space[ppn>>12];
    50     uint8_t **out = (uint8_t **)&page->fn;
    52     for( i=0; i<9; i+= inc, fn += inc, out += inc ) {
    53         *out = xlat_output;
    54 #if SIZEOF_VOID_P == 8
    55         MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
    56         OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
    57 #else 
    58         OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
    59 #endif
    60         ADD_imm32_r32( ppn-vpn, ARG1 ); // 6
    61         if( ent->mask >= 0xFFFFF000 ) {
    62             // Maps to a single page, so jump directly there
    63             int rel = (*fn - xlat_output);
    64             JMP_rel( rel ); // 5
    65         } else {
    66             MOV_r32_r32( ARG1, R_ECX ); // 2
    67             SHR_imm8_r32( 12, R_ECX );  // 3
    68             DECODE();                   // 14
    69             JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) );    // 3
    70         }
    71     }
    73     page->fn.prefetch = unmapped_prefetch; // FIXME
    74 }
    76 void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
    77 {
    78     uint32_t mask = ent->mask;
    79     uint32_t vpn = ent->vpn & mask;
    80     uint32_t ppn = ent->ppn & mask;
    82     xlat_output = page->code;
    84     memcpy( page, &p4_region_storequeue, sizeof(struct mem_region_fn) );
    86     /* TESTME: Does a PREF increment the URC counter? */
    87     page->fn.prefetch = (mem_prefetch_fn_t)xlat_output;
    88     ADD_imm32_r32( ppn-vpn, ARG1 );
    89     int rel = ((uint8_t *)ccn_storequeue_prefetch_tlb) - xlat_output;
    90     JMP_rel( rel );
    91 }
    93 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
    94 {
    95     xlat_output = entry->code;
    96     int i;
    97     uint8_t **out = (uint8_t **)&entry->fn;
    99     for( i=0; i<9; i++, out++ ) {
   100         *out = xlat_output;
   101         MOV_r32_r32( ARG1, R_ECX );
   102         SHR_imm8_r32( 10, R_ECX );
   103         AND_imm8s_r32( 0x3, R_ECX );
   104 #if SIZEOF_VOID_P == 8
   105         MOV_imm64_r32( (uintptr_t)&entry->subpages[0], R_EAX );
   106         REXW(); OP(0x8B); OP(0x0C); OP(0xC8);                   /* movq [%rax + %rcx*8], %rcx */
   107 #else
   108         MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->subpages[0]), R_ECX );
   109 #endif                
   110         JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) );    // 3
   111     }
   113     out = (uint8_t **)&entry->user_fn;
   114     for( i=0; i<9; i++, out++ ) {
   115         *out = xlat_output;
   116         MOV_r32_r32( ARG1, R_ECX );
   117         SHR_imm8_r32( 10, R_ECX );
   118         AND_imm8s_r32( 0x3, R_ECX );
   119 #if SIZEOF_VOID_P == 8
   120         MOV_imm64_r32( (uintptr_t)&entry->user_subpages[0], R_EAX );
   121         REXW(); OP(0x8B); OP(0x0C); OP(0xC8);                   /* movq [%rax + %rcx*8], %rcx */
   122 #else
   123         MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->user_subpages[0]), R_ECX );
   124 #endif                
   125         JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) );    // 3
   126     }
   128 }
.