Search
lxdream.org :: lxdream/src/sh4/mmux86.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmux86.c
changeset 1292:799fdd4f704a
prev1067:d3c00ffccfcd
author nkeynes
date Mon Oct 15 21:19:22 2012 +1000 (11 years ago)
permissions -rw-r--r--
last change Merge workaround for interpreted mode accesses to the OC ram regions in AT=0
cases. Still broken for other cases
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * x86-specific MMU code - this emits simple TLB stubs for TLB indirection.
     5   *
     6  * Copyright (c) 2008 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #include "lxdream.h"
    20 #include "mem.h"
    21 #include "sh4/sh4core.h"
    22 #include "sh4/sh4mmio.h"
    23 #include "sh4/sh4trans.h"
    24 #include "sh4/mmu.h"
    25 #include "xlat/x86/x86op.h"
    27 #if SIZEOF_VOID_P == 8
    28 #define XLAT(addr_space, reg) \
    29     MOVQ_imm64_r64( (uintptr_t)addr_space, REG_RAX ); \
    30     MOVP_sib_rptr( 3, reg, REG_RAX, 0, reg );
    31 #define ADDP_imms_ptr(imm,p) \
    32     MOVQ_imm64_r64((uintptr_t)p, REG_EAX ); \
    33     ADDL_imms_r32disp(imm, REG_EAX, 0);
    34 #else
    35 #define XLAT(addr_space, reg) \
    36     MOVP_sib_rptr( 2, reg, -1, (uintptr_t)addr_space, reg ); 
    37 #define ADDP_imms_ptr(imm,p) \
    38     ADDL_imms_r32disp(imm, -1, (uintptr_t)p);
    39 #endif
    41 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
    42 {
    43     uint32_t mask = ent->mask;
    44     uint32_t vpn = ent->vpn & mask;
    45     uint32_t ppn = ent->ppn & mask;
    46     struct mem_region_fn **addr_space;
    47     uint8_t **out = (uint8_t **)&page->fn;
    48     uint8_t **fn;
    49     int inc = writable ? 1 : 2; 
    50     int i;
    52     xlat_output = page->code;
    53     if( (ppn & 0x1FFFFFFF) >= 0x1C000000 ) {
    54         /* SH4 control region */
    55         ppn |= 0xE0000000;
    56         addr_space = sh4_address_space;
    57     } else {
    58         addr_space = ext_address_space;
    59     }
    60     fn = (uint8_t **)addr_space[ppn>>12];
    62     for( i=0; i<10; i+= inc, fn += inc, out += inc ) {
    63         *out = xlat_output;
    64         if( i != 9 ) { /* read_byte_for_write doesn't increment mmu_urc, everything else does */
    65             ADDP_imms_ptr(1, &mmu_urc);
    66         }
    67         ADDL_imms_r32( ppn-vpn, REG_ARG1 ); // 6
    68         if( ent->mask >= 0xFFFFF000 ) {
    69             // Maps to a single page, so jump directly there
    70             int rel = (*fn - xlat_output);
    71             JMP_prerel( rel ); // 5
    72         } else {
    73             MOVL_r32_r32( REG_ARG1, REG_CALLPTR ); // 2
    74             SHRL_imm_r32( 12, REG_CALLPTR );  // 3
    75             XLAT(addr_space, REG_CALLPTR);                   // 14
    76             JMP_r32disp(REG_CALLPTR, (((uintptr_t)out) - ((uintptr_t)&page->fn)) );    // 3
    77         }
    78     }
    80     page->fn.prefetch = unmapped_prefetch; // FIXME
    81 }
    83 void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
    84 {
    85     uint32_t mask = ent->mask;
    86     uint32_t vpn = ent->vpn & mask;
    87     uint32_t ppn = ent->ppn & mask;
    89     xlat_output = page->code;
    91     memcpy( page, &p4_region_storequeue, sizeof(struct mem_region_fn) );
    93     page->fn.prefetch = (mem_prefetch_fn_t)xlat_output;
    94     ADDP_imms_ptr(1, &mmu_urc);
    95     ADDL_imms_r32( ppn-vpn, REG_ARG1 );
    96     int rel = ((uint8_t *)ccn_storequeue_prefetch_tlb) - xlat_output;
    97     JMP_prerel( rel );
    98 }
   100 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
   101 {
   102     xlat_output = entry->code;
   103     int i;
   104     uint8_t **out = (uint8_t **)&entry->fn;
   106     for( i=0; i<9; i++, out++ ) {
   107         *out = xlat_output;
   108         MOVL_r32_r32( REG_ARG1, REG_CALLPTR );
   109         SHRL_imm_r32( 10, REG_CALLPTR );
   110         ANDL_imms_r32( 0x3, REG_CALLPTR );
   111         XLAT( (uintptr_t)&entry->subpages[0], REG_CALLPTR );
   112         JMP_r32disp(REG_CALLPTR, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) );    // 3
   113     }
   115     out = (uint8_t **)&entry->user_fn;
   116     for( i=0; i<9; i++, out++ ) {
   117         *out = xlat_output;
   118         MOVL_r32_r32( REG_ARG1, REG_CALLPTR );
   119         SHRL_imm_r32( 10, REG_CALLPTR );
   120         ANDL_imms_r32( 0x3, REG_CALLPTR );
   121         XLAT( (uintptr_t)&entry->user_subpages[0], REG_CALLPTR );
   122         JMP_r32disp(REG_CALLPTR, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) );    // 3
   123     }
   125 }
.