Search
lxdream.org :: lxdream/src/sh4/mmux86.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmux86.c
changeset 991:60c7fab9c880
prev975:007bf7eb944f
next995:eb9d43e8aa08
author nkeynes
date Wed Mar 04 23:12:21 2009 +0000 (14 years ago)
permissions -rw-r--r--
last change Move xltcache to xlat/ src directory
Commit new and improved x86 opcode file - cleaned up and added support for amd64 extended registers
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * x86-specific MMU code - this emits simple TLB stubs for TLB indirection.
     5   *
     6  * Copyright (c) 2008 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #include "lxdream.h"
    20 #include "mem.h"
    21 #include "sh4/sh4core.h"
    22 #include "sh4/sh4mmio.h"
    23 #include "sh4/sh4trans.h"
    24 #include "sh4/mmu.h"
    25 #include "xlat/x86/x86op.h"
    27 #if SIZEOF_VOID_P == 8
    28 #define ARG1 REG_RDI
    29 #define ARG2 REG_RSI
    30 #define XLAT(addr_space, reg) \
    31     MOVQ_imm64_r64( (uintptr_t)addr_space, REG_RAX ); \
    32     MOVP_sib_rptr( 3, reg, REG_RAX, 0, reg );
    33 #define ADDP_imms_ptr(imm,p) \
    34     MOVQ_imm64_r64((uintptr_t)p, REG_EAX ); \
    35     ADDL_imms_r32disp(imm, REG_EAX, 0);
    36 #else
    37 #define ARG1 REG_EAX
    38 #define ARG2 R_EDX
    39 #define XLAT(addr_space, reg) \
    40     MOVP_sib_rptr( 2, reg, -1, (uintptr_t)addr_space, reg ); 
    41 #define ADDP_imms_ptr(imm,p) \
    42     ADDL_imms_r32disp(imm, -1, (uintptr_t)p);
    43 #endif
    45 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
    46 {
    47     uint32_t mask = ent->mask;
    48     uint32_t vpn = ent->vpn & mask;
    49     uint32_t ppn = ent->ppn & mask;
    50     struct mem_region_fn **addr_space;
    51     uint8_t **out = (uint8_t **)&page->fn;
    52     uint8_t **fn;
    53     int inc = writable ? 1 : 2; 
    54     int i;
    56     xlat_output = page->code;
    57     if( (ppn & 0x1FFFFFFF) >= 0x1C000000 ) {
    58         /* SH4 control region */
    59         ppn |= 0xE0000000;
    60         addr_space = sh4_address_space;
    61     } else {
    62         addr_space = ext_address_space;
    63     }
    64     fn = (uint8_t **)addr_space[ppn>>12];
    66     for( i=0; i<10; i+= inc, fn += inc, out += inc ) {
    67         *out = xlat_output;
    68         if( i != 9 ) { /* read_byte_for_write doesn't increment mmu_urc, everything else does */
    69             ADDP_imms_ptr(1, &mmu_urc);
    70         }
    71         ADDL_imms_r32( ppn-vpn, ARG1 ); // 6
    72         if( ent->mask >= 0xFFFFF000 ) {
    73             // Maps to a single page, so jump directly there
    74             int rel = (*fn - xlat_output);
    75             JMP_prerel( rel ); // 5
    76         } else {
    77             MOVL_r32_r32( ARG1, REG_ECX ); // 2
    78             SHRL_imm_r32( 12, REG_ECX );  // 3
    79             XLAT(addr_space, REG_ECX);                   // 14
    80             JMP_r32disp(REG_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) );    // 3
    81         }
    82     }
    84     page->fn.prefetch = unmapped_prefetch; // FIXME
    85 }
    87 void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
    88 {
    89     uint32_t mask = ent->mask;
    90     uint32_t vpn = ent->vpn & mask;
    91     uint32_t ppn = ent->ppn & mask;
    93     xlat_output = page->code;
    95     memcpy( page, &p4_region_storequeue, sizeof(struct mem_region_fn) );
    97     page->fn.prefetch = (mem_prefetch_fn_t)xlat_output;
    98     ADDP_imms_ptr(1, &mmu_urc);
    99     ADDL_imms_r32( ppn-vpn, ARG1 );
   100     int rel = ((uint8_t *)ccn_storequeue_prefetch_tlb) - xlat_output;
   101     JMP_prerel( rel );
   102 }
   104 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
   105 {
   106     xlat_output = entry->code;
   107     int i;
   108     uint8_t **out = (uint8_t **)&entry->fn;
   110     for( i=0; i<9; i++, out++ ) {
   111         *out = xlat_output;
   112         MOVL_r32_r32( ARG1, REG_ECX );
   113         SHRL_imm_r32( 10, REG_ECX );
   114         ANDL_imms_r32( 0x3, REG_ECX );
   115         XLAT( (uintptr_t)&entry->subpages[0], REG_ECX );
   116         JMP_r32disp(REG_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) );    // 3
   117     }
   119     out = (uint8_t **)&entry->user_fn;
   120     for( i=0; i<9; i++, out++ ) {
   121         *out = xlat_output;
   122         MOVL_r32_r32( ARG1, REG_ECX );
   123         SHRL_imm_r32( 10, REG_ECX );
   124         ANDL_imms_r32( 0x3, REG_ECX );
   125         XLAT( (uintptr_t)&entry->user_subpages[0], REG_ECX );
   126         JMP_r32disp(REG_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) );    // 3
   127     }
   129 }
.