Search
lxdream.org :: lxdream/src/sh4/sh4core.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/sh4core.h
changeset 911:2f6ba75b84d1
prev905:4c17ebd9ef5e
next927:17b6b9e245d8
author nkeynes
date Thu Dec 11 23:26:03 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Disable the generational translation cache - I've got no evidence that it
actually helps performance, and it simplifies things to get rid of it (in
particular, translated code doesn't have to worry about being moved now).
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * This file defines the internal functions exported/used by the SH4 core, 
     5  * except for disassembly functions defined in sh4dasm.h
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #ifndef lxdream_sh4core_H
    21 #define lxdream_sh4core_H 1
    23 #include <glib/gtypes.h>
    24 #include <stdint.h>
    25 #include <stdio.h>
    26 #include "mem.h"
    27 #include "sh4/sh4.h"
    29 #ifdef __cplusplus
    30 extern "C" {
    31 #endif
    33 /* Breakpoint data structure */
    34 extern struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
    35 extern int sh4_breakpoint_count;
    36 extern sh4ptr_t sh4_main_ram;
    37 extern gboolean sh4_starting;
    39 /**
    40  * Cached direct pointer to the current instruction page. If AT is on, this
    41  * is derived from the ITLB, otherwise this will be the entire memory region.
    42  * This is actually a fairly useful optimization, as we can make a lot of
    43  * assumptions about the "current page" that we can't make in general for
    44  * arbitrary virtual addresses.
    45  */
    46 struct sh4_icache_struct {
    47     sh4ptr_t page; // Page pointer (NULL if no page)
    48     sh4vma_t page_vma; // virtual address of the page.
    49     sh4addr_t page_ppa; // physical address of the page
    50     uint32_t mask;  // page mask 
    51 };
    52 extern struct sh4_icache_struct sh4_icache;
    54 /**
    55  * Test if a given address is contained in the current icache entry
    56  */
    57 #define IS_IN_ICACHE(addr) (sh4_icache.page_vma == ((addr) & sh4_icache.mask))
    58 /**
    59  * Return a pointer for the given vma, under the assumption that it is
    60  * actually contained in the current icache entry.
    61  */
    62 #define GET_ICACHE_PTR(addr) (sh4_icache.page + ((addr)-sh4_icache.page_vma))
    63 /**
    64  * Return the physical (external) address for the given vma, assuming that it is
    65  * actually contained in the current icache entry.
    66  */
    67 #define GET_ICACHE_PHYS(addr) (sh4_icache.page_ppa + ((addr)-sh4_icache.page_vma))
    69 /**
    70  * Return the virtual (vma) address for the first address past the end of the 
    71  * cache entry. Assumes that there is in fact a current icache entry.
    72  */
    73 #define GET_ICACHE_END() (sh4_icache.page_vma + (~sh4_icache.mask) + 1)
    76 /**
    77  * SH4 vm-exit flag - exit the current block but continue (eg exception handling)
    78  */
    79 #define CORE_EXIT_CONTINUE 1
    81 /**
    82  * SH4 vm-exit flag - exit the current block and halt immediately (eg fatal error)
    83  */
    84 #define CORE_EXIT_HALT 2
    86 /**
    87  * SH4 vm-exit flag - exit the current block and halt immediately for a system
    88  * breakpoint.
    89  */
    90 #define CORE_EXIT_BREAKPOINT 3
    92 /**
    93  * SH4 vm-exit flag - exit the current block and continue after performing a full
    94  * system reset (dreamcast_reset())
    95  */
    96 #define CORE_EXIT_SYSRESET 4
    98 /**
    99  * SH4 vm-exit flag - exit the current block and continue after the next IRQ.
   100  */
   101 #define CORE_EXIT_SLEEP 5
   103 /**
   104  * SH4 vm-exit flag - exit the current block  and flush all instruction caches (ie
   105  * if address translation has changed)
   106  */
   107 #define CORE_EXIT_FLUSH_ICACHE 6
   109 typedef uint32_t (*sh4_run_slice_fn)(uint32_t);
   111 /* SH4 module functions */
   112 void sh4_init( void );
   113 void sh4_reset( void );
   114 void sh4_run( void );
   115 void sh4_stop( void );
   116 uint32_t sh4_run_slice( uint32_t nanos ); // Run single timeslice using emulator
   117 uint32_t sh4_xlat_run_slice( uint32_t nanos ); // Run single timeslice using translator
   118 uint32_t sh4_sleep_run_slice( uint32_t nanos ); // Run single timeslice while the CPU is asleep
   120 /**
   121  * Immediately exit from the currently executing instruction with the given
   122  * exit code. This method does not return.
   123  */
   124 void sh4_core_exit( int exit_code );
   126 /**
   127  * Exit the current block at the end of the current instruction, flush the
   128  * translation cache (completely) and return control to sh4_xlat_run_slice.
   129  *
   130  * As a special case, if the current instruction is actually the last 
   131  * instruction in the block (ie it's in a delay slot), this function 
   132  * returns to allow normal completion of the translation block. Otherwise
   133  * this function never returns.
   134  *
   135  * Must only be invoked (indirectly) from within translated code.
   136  */
   137 void sh4_flush_icache();
   139 /* SH4 peripheral module functions */
   140 void CPG_reset( void );
   141 void DMAC_reset( void );
   142 void DMAC_run_slice( uint32_t );
   143 void DMAC_save_state( FILE * );
   144 int DMAC_load_state( FILE * );
   145 void INTC_reset( void );
   146 void INTC_save_state( FILE *f );
   147 int INTC_load_state( FILE *f );
   148 void MMU_init( void );
   149 void MMU_reset( void );
   150 void MMU_save_state( FILE *f );
   151 int MMU_load_state( FILE *f );
   152 void MMU_ldtlb();
   153 void SCIF_reset( void );
   154 void SCIF_run_slice( uint32_t );
   155 void SCIF_save_state( FILE *f );
   156 int SCIF_load_state( FILE *f );
   157 void SCIF_update_line_speed(void);
   158 void TMU_init( void );
   159 void TMU_reset( void );
   160 void TMU_run_slice( uint32_t );
   161 void TMU_save_state( FILE * );
   162 int TMU_load_state( FILE * );
   163 void TMU_update_clocks( void );
   164 void PMM_reset( void );
   165 void PMM_write_control( int, uint32_t );
   166 void PMM_save_state( FILE * );
   167 int PMM_load_state( FILE * );
   168 uint32_t PMM_run_slice( uint32_t );
   169 uint32_t sh4_translate_run_slice(uint32_t);
   170 uint32_t sh4_emulate_run_slice(uint32_t);
   172 /* SH4 instruction support methods */
   173 void FASTCALL sh4_sleep( void );
   174 void FASTCALL sh4_fsca( uint32_t angle, float *fr );
   175 void FASTCALL sh4_ftrv( float *fv );
   176 uint32_t FASTCALL sh4_read_sr(void);
   177 void FASTCALL sh4_write_sr(uint32_t val);
   178 void FASTCALL sh4_write_fpscr(uint32_t val);
   179 void FASTCALL sh4_switch_fr_banks(void);
   180 void FASTCALL signsat48(void);
   181 gboolean sh4_has_page( sh4vma_t vma );
   183 /* SH4 Memory */
   184 #define MMU_VMA_ERROR 0x80000000
   185 /**
   186  * Update the sh4_icache structure to contain the specified vma. If the vma
   187  * cannot be resolved, an MMU exception is raised and the function returns
   188  * FALSE. Otherwise, returns TRUE and updates sh4_icache accordingly.
   189  * Note: If the vma resolves to a non-memory area, sh4_icache will be 
   190  * invalidated, but the function will still return TRUE.
   191  * @return FALSE if an MMU exception was raised, otherwise TRUE.
   192  */
   193 gboolean FASTCALL mmu_update_icache( sh4vma_t addr );
   195 /**
   196  * Resolve a virtual address through the TLB for a read operation, returning 
   197  * the resultant P4 or external address. If the resolution fails, the 
   198  * appropriate MMU exception is raised and the value MMU_VMA_ERROR is returned.
   199  * @return An external address (0x00000000-0x1FFFFFFF), a P4 address
   200  * (0xE0000000 - 0xFFFFFFFF), or MMU_VMA_ERROR.
   201  */
   202 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr );
   203 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr );
   204 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t addr );
   206 int64_t FASTCALL sh4_read_quad( sh4addr_t addr );
   207 int32_t FASTCALL sh4_read_long( sh4addr_t addr );
   208 int32_t FASTCALL sh4_read_word( sh4addr_t addr );
   209 int32_t FASTCALL sh4_read_byte( sh4addr_t addr );
   210 void FASTCALL sh4_write_quad( sh4addr_t addr, uint64_t val );
   211 void FASTCALL sh4_write_long( sh4addr_t addr, uint32_t val );
   212 void FASTCALL sh4_write_word( sh4addr_t addr, uint32_t val );
   213 void FASTCALL sh4_write_byte( sh4addr_t addr, uint32_t val );
   214 int32_t sh4_read_phys_word( sh4addr_t addr );
   215 void FASTCALL sh4_flush_store_queue( sh4addr_t addr );
   216 gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr );
   218 /* SH4 Exceptions */
   219 #define EXC_POWER_RESET     0x000 /* reset vector */
   220 #define EXC_MANUAL_RESET    0x020 /* reset vector */
   221 #define EXC_TLB_MISS_READ   0x040 /* TLB vector */
   222 #define EXC_TLB_MISS_WRITE  0x060 /* TLB vector */
   223 #define EXC_INIT_PAGE_WRITE 0x080
   224 #define EXC_TLB_PROT_READ   0x0A0
   225 #define EXC_TLB_PROT_WRITE  0x0C0
   226 #define EXC_DATA_ADDR_READ  0x0E0
   227 #define EXC_DATA_ADDR_WRITE 0x100
   228 #define EXC_TLB_MULTI_HIT   0x140
   229 #define EXC_SLOT_ILLEGAL    0x1A0
   230 #define EXC_ILLEGAL         0x180
   231 #define EXC_TRAP            0x160
   232 #define EXC_FPU_DISABLED    0x800
   233 #define EXC_SLOT_FPU_DISABLED 0x820
   235 #define EXV_EXCEPTION    0x100  /* General exception vector */
   236 #define EXV_TLBMISS      0x400  /* TLB-miss exception vector */
   237 #define EXV_INTERRUPT    0x600  /* External interrupt vector */
   239 gboolean FASTCALL sh4_raise_exception( int );
   240 gboolean FASTCALL sh4_raise_reset( int );
   241 gboolean FASTCALL sh4_raise_trap( int );
   242 gboolean FASTCALL sh4_raise_slot_exception( int, int );
   243 gboolean FASTCALL sh4_raise_tlb_exception( int );
   244 void FASTCALL sh4_accept_interrupt( void );
   246 #define SIGNEXT4(n) ((((int32_t)(n))<<28)>>28)
   247 #define SIGNEXT8(n) ((int32_t)((int8_t)(n)))
   248 #define SIGNEXT12(n) ((((int32_t)(n))<<20)>>20)
   249 #define SIGNEXT16(n) ((int32_t)((int16_t)(n)))
   250 #define SIGNEXT32(n) ((int64_t)((int32_t)(n)))
   251 #define SIGNEXT48(n) ((((int64_t)(n))<<16)>>16)
   252 #define ZEROEXT32(n) ((int64_t)((uint64_t)((uint32_t)(n))))
   254 /* Status Register (SR) bits */
   255 #define SR_MD    0x40000000 /* Processor mode ( User=0, Privileged=1 ) */ 
   256 #define SR_RB    0x20000000 /* Register bank (priviledged mode only) */
   257 #define SR_BL    0x10000000 /* Exception/interupt block (1 = masked) */
   258 #define SR_FD    0x00008000 /* FPU disable */
   259 #define SR_M     0x00000200
   260 #define SR_Q     0x00000100
   261 #define SR_IMASK 0x000000F0 /* Interrupt mask level */
   262 #define SR_S     0x00000002 /* Saturation operation for MAC instructions */
   263 #define SR_T     0x00000001 /* True/false or carry/borrow */
   264 #define SR_MASK  0x700083F3
   265 #define SR_MQSTMASK 0xFFFFFCFC /* Mask to clear the flags we're keeping separately */
   266 #define SR_MDRB  0x60000000 /* MD+RB mask for convenience */
   268 #define IS_SH4_PRIVMODE() (sh4r.sr&SR_MD)
   269 #define SH4_INTMASK() ((sh4r.sr&SR_IMASK)>>4)
   270 #define SH4_EVENT_PENDING() (sh4r.event_pending <= sh4r.slice_cycle && !sh4r.in_delay_slot)
   272 #define FPSCR_FR     0x00200000 /* FPU register bank */
   273 #define FPSCR_SZ     0x00100000 /* FPU transfer size (0=32 bits, 1=64 bits) */
   274 #define FPSCR_PR     0x00080000 /* Precision (0=32 bites, 1=64 bits) */
   275 #define FPSCR_DN     0x00040000 /* Denormalization mode (1 = treat as 0) */
   276 #define FPSCR_CAUSE  0x0003F000
   277 #define FPSCR_ENABLE 0x00000F80
   278 #define FPSCR_FLAG   0x0000007C
   279 #define FPSCR_RM     0x00000003 /* Rounding mode (0=nearest, 1=to zero) */
   280 #define FPSCR_MASK   0x003FFFFF
   282 #define IS_FPU_DOUBLEPREC() (sh4r.fpscr&FPSCR_PR)
   283 #define IS_FPU_DOUBLESIZE() (sh4r.fpscr&FPSCR_SZ)
   284 #define IS_FPU_ENABLED() ((sh4r.sr&SR_FD)==0)
   286 #define FR(x) sh4r.fr[0][(x)^1]
   287 #define DRF(x) *((double *)&sh4r.fr[0][(x)<<1])
   288 #define XF(x) sh4r.fr[1][(x)^1]
   289 #define XDR(x) *((double *)&sh4r.fr[1][(x)<<1])
   290 #define DRb(x,b) *((double *)&sh4r.fr[b][(x)<<1])
   291 #define DR(x) *((double *)&sh4r.fr[x&1][x&0x0E])
   292 #define FPULf    (sh4r.fpul.f)
   293 #define FPULi    (sh4r.fpul.i)
   295 #define SH4_WRITE_STORE_QUEUE(addr,val) sh4r.store_queue[(addr>>2)&0xF] = val;
   297 #ifdef __cplusplus
   298 }
   299 #endif
   301 #endif /* !lxdream_sh4core_H */
.