Search
lxdream.org :: lxdream/src/sh4/mmux86.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmux86.c
changeset 972:fb948057cf08
prev953:f4a156508ad1
next975:007bf7eb944f
author nkeynes
date Mon Jan 26 03:05:54 2009 +0000 (13 years ago)
permissions -rw-r--r--
last change Fix TLB access to SH4 peripheral control regions
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * x86-specific MMU code - this emits simple TLB stubs for TLB indirection.
     5   *
     6  * Copyright (c) 2008 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #include "lxdream.h"
    20 #include "mem.h"
    21 #include "sh4/sh4core.h"
    22 #include "sh4/sh4mmio.h"
    23 #include "sh4/sh4trans.h"
    24 #include "sh4/mmu.h"
    25 #include "sh4/x86op.h"
    27 #if SIZEOF_VOID_P == 8
    28 #define ARG1 R_EDI
    29 #define ARG2 R_ESI
    30 #define DECODE() \
    31     MOV_imm64_r32((uintptr_t)addr_space, R_EAX);     /* movq ptr, %rax */ \
    32     REXW(); OP(0x8B); OP(0x0C); OP(0xC8)                    /* movq [%rax + %rcx*8], %rcx */
    33 #else
    34 #define ARG1 R_EAX
    35 #define ARG2 R_EDX
    36 #define DECODE() \
    37     MOV_r32disp32x4_r32( R_ECX, (uintptr_t)addr_space, R_ECX );
    38 #endif
    40 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
    41 {
    42     uint32_t mask = ent->mask;
    43     uint32_t vpn = ent->vpn & mask;
    44     uint32_t ppn = ent->ppn & mask;
    45     struct mem_region_fn **addr_space;
    46     uint8_t **out = (uint8_t **)&page->fn;
    47     uint8_t **fn;
    48     int inc = writable ? 1 : 2; 
    49     int i;
    51     xlat_output = page->code;
    52     if( (ppn & 0x1FFFFFFF) >= 0x1C000000 ) {
    53         /* SH4 control region */
    54         ppn |= 0xE0000000;
    55         addr_space = sh4_address_space;
    56     } else {
    57         addr_space = ext_address_space;
    58     }
    59     fn = (uint8_t **)addr_space[ppn>>12];
    61     for( i=0; i<9; i+= inc, fn += inc, out += inc ) {
    62         *out = xlat_output;
    63 #if SIZEOF_VOID_P == 8
    64         MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
    65         OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
    66 #else 
    67         OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
    68 #endif
    69         ADD_imm32_r32( ppn-vpn, ARG1 ); // 6
    70         if( ent->mask >= 0xFFFFF000 ) {
    71             // Maps to a single page, so jump directly there
    72             int rel = (*fn - xlat_output);
    73             JMP_rel( rel ); // 5
    74         } else {
    75             MOV_r32_r32( ARG1, R_ECX ); // 2
    76             SHR_imm8_r32( 12, R_ECX );  // 3
    77             DECODE();                   // 14
    78             JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) );    // 3
    79         }
    80     }
    82     page->fn.prefetch = unmapped_prefetch; // FIXME
    83 }
    85 void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
    86 {
    87     uint32_t mask = ent->mask;
    88     uint32_t vpn = ent->vpn & mask;
    89     uint32_t ppn = ent->ppn & mask;
    91     xlat_output = page->code;
    93     memcpy( page, &p4_region_storequeue, sizeof(struct mem_region_fn) );
    95     /* TESTME: Does a PREF increment the URC counter? */
    96     page->fn.prefetch = (mem_prefetch_fn_t)xlat_output;
    97     ADD_imm32_r32( ppn-vpn, ARG1 );
    98     int rel = ((uint8_t *)ccn_storequeue_prefetch_tlb) - xlat_output;
    99     JMP_rel( rel );
   100 }
   102 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
   103 {
   104     xlat_output = entry->code;
   105     int i;
   106     uint8_t **out = (uint8_t **)&entry->fn;
   108     for( i=0; i<9; i++, out++ ) {
   109         *out = xlat_output;
   110         MOV_r32_r32( ARG1, R_ECX );
   111         SHR_imm8_r32( 10, R_ECX );
   112         AND_imm8s_r32( 0x3, R_ECX );
   113 #if SIZEOF_VOID_P == 8
   114         MOV_imm64_r32( (uintptr_t)&entry->subpages[0], R_EAX );
   115         REXW(); OP(0x8B); OP(0x0C); OP(0xC8);                   /* movq [%rax + %rcx*8], %rcx */
   116 #else
   117         MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->subpages[0]), R_ECX );
   118 #endif                
   119         JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) );    // 3
   120     }
   122     out = (uint8_t **)&entry->user_fn;
   123     for( i=0; i<9; i++, out++ ) {
   124         *out = xlat_output;
   125         MOV_r32_r32( ARG1, R_ECX );
   126         SHR_imm8_r32( 10, R_ECX );
   127         AND_imm8s_r32( 0x3, R_ECX );
   128 #if SIZEOF_VOID_P == 8
   129         MOV_imm64_r32( (uintptr_t)&entry->user_subpages[0], R_EAX );
   130         REXW(); OP(0x8B); OP(0x0C); OP(0xC8);                   /* movq [%rax + %rcx*8], %rcx */
   131 #else
   132         MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->user_subpages[0]), R_ECX );
   133 #endif                
   134         JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) );    // 3
   135     }
   137 }
.