Search
lxdream.org :: lxdream/src/sh4/sh4core.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/sh4core.h
changeset 934:3acd3b3ee6d1
prev931:430048ea8b71
next939:6f2302afeb89
author nkeynes
date Sat Dec 27 02:59:35 2008 +0000 (15 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Replace fpscr_mask/fpscr flags in xlat_cache_block with a single xlat_sh4_mode,
which tracks the field of the same name in sh4r - actually a little faster this way.
Now depends on SR.MD, FPSCR.PR and FPSCR.SZ (although it doesn't benefit from the SR
flag yet).

Also fixed the failure to check the flags in the common case (code address returned
by previous block) which took away the performance benefits, but oh well.
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * This file defines the internal functions exported/used by the SH4 core, 
     5  * except for disassembly functions defined in sh4dasm.h
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #ifndef lxdream_sh4core_H
    21 #define lxdream_sh4core_H 1
    23 #include <glib/gtypes.h>
    24 #include <stdint.h>
    25 #include <stdio.h>
    26 #include "mem.h"
    27 #include "sh4/sh4.h"
    29 #ifdef __cplusplus
    30 extern "C" {
    31 #endif
    33 /* Breakpoint data structure */
    34 extern struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
    35 extern int sh4_breakpoint_count;
    36 extern gboolean sh4_starting;
    38 /**
    39  * Cached direct pointer to the current instruction page. If AT is on, this
    40  * is derived from the ITLB, otherwise this will be the entire memory region.
    41  * This is actually a fairly useful optimization, as we can make a lot of
    42  * assumptions about the "current page" that we can't make in general for
    43  * arbitrary virtual addresses.
    44  */
    45 struct sh4_icache_struct {
    46     sh4ptr_t page; // Page pointer (NULL if no page)
    47     sh4vma_t page_vma; // virtual address of the page.
    48     sh4addr_t page_ppa; // physical address of the page
    49     uint32_t mask;  // page mask 
    50 };
    51 extern struct sh4_icache_struct sh4_icache;
    53 extern struct mem_region_fn **sh4_address_space;
    55 /**
    56  * Test if a given address is contained in the current icache entry
    57  */
    58 #define IS_IN_ICACHE(addr) (sh4_icache.page_vma == ((addr) & sh4_icache.mask))
    59 /**
    60  * Return a pointer for the given vma, under the assumption that it is
    61  * actually contained in the current icache entry.
    62  */
    63 #define GET_ICACHE_PTR(addr) (sh4_icache.page + ((addr)-sh4_icache.page_vma))
    64 /**
    65  * Return the physical (external) address for the given vma, assuming that it is
    66  * actually contained in the current icache entry.
    67  */
    68 #define GET_ICACHE_PHYS(addr) (sh4_icache.page_ppa + ((addr)-sh4_icache.page_vma))
    70 /**
    71  * Return the virtual (vma) address for the first address past the end of the 
    72  * cache entry. Assumes that there is in fact a current icache entry.
    73  */
    74 #define GET_ICACHE_END() (sh4_icache.page_vma + (~sh4_icache.mask) + 1)
    77 /**
    78  * SH4 vm-exit flag - exit the current block but continue (eg exception handling)
    79  */
    80 #define CORE_EXIT_CONTINUE 1
    82 /**
    83  * SH4 vm-exit flag - exit the current block and halt immediately (eg fatal error)
    84  */
    85 #define CORE_EXIT_HALT 2
    87 /**
    88  * SH4 vm-exit flag - exit the current block and halt immediately for a system
    89  * breakpoint.
    90  */
    91 #define CORE_EXIT_BREAKPOINT 3
    93 /**
    94  * SH4 vm-exit flag - exit the current block and continue after performing a full
    95  * system reset (dreamcast_reset())
    96  */
    97 #define CORE_EXIT_SYSRESET 4
    99 /**
   100  * SH4 vm-exit flag - exit the current block and continue after the next IRQ.
   101  */
   102 #define CORE_EXIT_SLEEP 5
   104 /**
   105  * SH4 vm-exit flag - exit the current block  and flush all instruction caches (ie
   106  * if address translation has changed)
   107  */
   108 #define CORE_EXIT_FLUSH_ICACHE 6
   110 typedef uint32_t (*sh4_run_slice_fn)(uint32_t);
   112 /* SH4 module functions */
   113 void sh4_init( void );
   114 void sh4_reset( void );
   115 void sh4_run( void );
   116 void sh4_stop( void );
   117 uint32_t sh4_run_slice( uint32_t nanos ); // Run single timeslice using emulator
   118 uint32_t sh4_xlat_run_slice( uint32_t nanos ); // Run single timeslice using translator
   119 uint32_t sh4_sleep_run_slice( uint32_t nanos ); // Run single timeslice while the CPU is asleep
   121 /**
   122  * Immediately exit from the currently executing instruction with the given
   123  * exit code. This method does not return.
   124  */
   125 void sh4_core_exit( int exit_code );
   127 /**
   128  * Exit the current block at the end of the current instruction, flush the
   129  * translation cache (completely) and return control to sh4_xlat_run_slice.
   130  *
   131  * As a special case, if the current instruction is actually the last 
   132  * instruction in the block (ie it's in a delay slot), this function 
   133  * returns to allow normal completion of the translation block. Otherwise
   134  * this function never returns.
   135  *
   136  * Must only be invoked (indirectly) from within translated code.
   137  */
   138 void sh4_flush_icache();
   140 /* SH4 peripheral module functions */
   141 void CPG_reset( void );
   142 void DMAC_reset( void );
   143 void DMAC_run_slice( uint32_t );
   144 void DMAC_save_state( FILE * );
   145 int DMAC_load_state( FILE * );
   146 void INTC_reset( void );
   147 void INTC_save_state( FILE *f );
   148 int INTC_load_state( FILE *f );
   149 void MMU_reset( void );
   150 void MMU_save_state( FILE *f );
   151 int MMU_load_state( FILE *f );
   152 void MMU_ldtlb();
   153 void CCN_save_state( FILE *f );
   154 int CCN_load_state( FILE *f );
   155 void SCIF_reset( void );
   156 void SCIF_run_slice( uint32_t );
   157 void SCIF_save_state( FILE *f );
   158 int SCIF_load_state( FILE *f );
   159 void SCIF_update_line_speed(void);
   160 void TMU_init( void );
   161 void TMU_reset( void );
   162 void TMU_run_slice( uint32_t );
   163 void TMU_save_state( FILE * );
   164 int TMU_load_state( FILE * );
   165 void TMU_update_clocks( void );
   166 void PMM_reset( void );
   167 void PMM_write_control( int, uint32_t );
   168 void PMM_save_state( FILE * );
   169 int PMM_load_state( FILE * );
   170 uint32_t PMM_run_slice( uint32_t );
   171 uint32_t sh4_translate_run_slice(uint32_t);
   172 uint32_t sh4_emulate_run_slice(uint32_t);
   174 /* SH4 instruction support methods */
   175 mem_region_fn_t FASTCALL sh7750_decode_address( sh4addr_t address );
   176 void FASTCALL sh7750_decode_address_copy( sh4addr_t address, mem_region_fn_t result );
   177 void FASTCALL sh4_sleep( void );
   178 void FASTCALL sh4_fsca( uint32_t angle, float *fr );
   179 void FASTCALL sh4_ftrv( float *fv );
   180 uint32_t FASTCALL sh4_read_sr(void);
   181 void FASTCALL sh4_write_sr(uint32_t val);
   182 void FASTCALL sh4_write_fpscr(uint32_t val);
   183 void FASTCALL sh4_switch_fr_banks(void);
   184 void FASTCALL signsat48(void);
   185 gboolean sh4_has_page( sh4vma_t vma );
   187 /* SH4 Memory */
   188 #define MMU_VMA_ERROR 0x80000000
   189 /**
   190  * Update the sh4_icache structure to contain the specified vma. If the vma
   191  * cannot be resolved, an MMU exception is raised and the function returns
   192  * FALSE. Otherwise, returns TRUE and updates sh4_icache accordingly.
   193  * Note: If the vma resolves to a non-memory area, sh4_icache will be 
   194  * invalidated, but the function will still return TRUE.
   195  * @return FALSE if an MMU exception was raised, otherwise TRUE.
   196  */
   197 gboolean FASTCALL mmu_update_icache( sh4vma_t addr );
   199 /**
   200  * Resolve a virtual address through the TLB for a read operation, returning 
   201  * the resultant P4 or external address. If the resolution fails, the 
   202  * appropriate MMU exception is raised and the value MMU_VMA_ERROR is returned.
   203  * @return An external address (0x00000000-0x1FFFFFFF), a P4 address
   204  * (0xE0000000 - 0xFFFFFFFF), or MMU_VMA_ERROR.
   205  */
   206 #ifdef HAVE_FRAME_ADDRESS
   207 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc );
   208 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc );
   209 #else
   210 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr );
   211 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr );
   212 #endif
   213 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t addr );
   215 int64_t FASTCALL sh4_read_quad( sh4addr_t addr );
   216 int32_t FASTCALL sh4_read_long( sh4addr_t addr );
   217 int32_t FASTCALL sh4_read_word( sh4addr_t addr );
   218 int32_t FASTCALL sh4_read_byte( sh4addr_t addr );
   219 void FASTCALL sh4_write_quad( sh4addr_t addr, uint64_t val );
   220 void FASTCALL sh4_write_long( sh4addr_t addr, uint32_t val );
   221 void FASTCALL sh4_write_word( sh4addr_t addr, uint32_t val );
   222 void FASTCALL sh4_write_byte( sh4addr_t addr, uint32_t val );
   223 int32_t sh4_read_phys_word( sh4addr_t addr );
   224 void FASTCALL sh4_flush_store_queue( sh4addr_t addr );
   225 gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr );
   227 /* SH4 Exceptions */
   228 #define EXC_POWER_RESET     0x000 /* reset vector */
   229 #define EXC_MANUAL_RESET    0x020 /* reset vector */
   230 #define EXC_TLB_MISS_READ   0x040 /* TLB vector */
   231 #define EXC_TLB_MISS_WRITE  0x060 /* TLB vector */
   232 #define EXC_INIT_PAGE_WRITE 0x080
   233 #define EXC_TLB_PROT_READ   0x0A0
   234 #define EXC_TLB_PROT_WRITE  0x0C0
   235 #define EXC_DATA_ADDR_READ  0x0E0
   236 #define EXC_DATA_ADDR_WRITE 0x100
   237 #define EXC_TLB_MULTI_HIT   0x140
   238 #define EXC_SLOT_ILLEGAL    0x1A0
   239 #define EXC_ILLEGAL         0x180
   240 #define EXC_TRAP            0x160
   241 #define EXC_FPU_DISABLED    0x800
   242 #define EXC_SLOT_FPU_DISABLED 0x820
   244 #define EXV_EXCEPTION    0x100  /* General exception vector */
   245 #define EXV_TLBMISS      0x400  /* TLB-miss exception vector */
   246 #define EXV_INTERRUPT    0x600  /* External interrupt vector */
   248 gboolean FASTCALL sh4_raise_exception( int );
   249 gboolean FASTCALL sh4_raise_reset( int );
   250 gboolean FASTCALL sh4_raise_trap( int );
   251 gboolean FASTCALL sh4_raise_slot_exception( int, int );
   252 gboolean FASTCALL sh4_raise_tlb_exception( int );
   253 void FASTCALL sh4_accept_interrupt( void );
   255 /* Status Register (SR) bits */
   256 #define SR_MD    0x40000000 /* Processor mode ( User=0, Privileged=1 ) */ 
   257 #define SR_RB    0x20000000 /* Register bank (priviledged mode only) */
   258 #define SR_BL    0x10000000 /* Exception/interupt block (1 = masked) */
   259 #define SR_FD    0x00008000 /* FPU disable */
   260 #define SR_M     0x00000200
   261 #define SR_Q     0x00000100
   262 #define SR_IMASK 0x000000F0 /* Interrupt mask level */
   263 #define SR_S     0x00000002 /* Saturation operation for MAC instructions */
   264 #define SR_T     0x00000001 /* True/false or carry/borrow */
   265 #define SR_MASK  0x700083F3
   266 #define SR_MQSTMASK 0xFFFFFCFC /* Mask to clear the flags we're keeping separately */
   267 #define SR_MDRB  0x60000000 /* MD+RB mask for convenience */
   269 #define IS_SH4_PRIVMODE() (sh4r.sr&SR_MD)
   270 #define SH4_INTMASK() ((sh4r.sr&SR_IMASK)>>4)
   271 #define SH4_EVENT_PENDING() (sh4r.event_pending <= sh4r.slice_cycle && !sh4r.in_delay_slot)
   273 #define FPSCR_FR     0x00200000 /* FPU register bank */
   274 #define FPSCR_SZ     0x00100000 /* FPU transfer size (0=32 bits, 1=64 bits) */
   275 #define FPSCR_PR     0x00080000 /* Precision (0=32 bites, 1=64 bits) */
   276 #define FPSCR_DN     0x00040000 /* Denormalization mode (1 = treat as 0) */
   277 #define FPSCR_CAUSE  0x0003F000
   278 #define FPSCR_ENABLE 0x00000F80
   279 #define FPSCR_FLAG   0x0000007C
   280 #define FPSCR_RM     0x00000003 /* Rounding mode (0=nearest, 1=to zero) */
   281 #define FPSCR_MASK   0x003FFFFF
   283 #define IS_FPU_DOUBLEPREC() (sh4r.fpscr&FPSCR_PR)
   284 #define IS_FPU_DOUBLESIZE() (sh4r.fpscr&FPSCR_SZ)
   285 #define IS_FPU_ENABLED() ((sh4r.sr&SR_FD)==0)
   287 #define FR(x) sh4r.fr[0][(x)^1]
   288 #define DRF(x) *((double *)&sh4r.fr[0][(x)<<1])
   289 #define XF(x) sh4r.fr[1][(x)^1]
   290 #define XDR(x) *((double *)&sh4r.fr[1][(x)<<1])
   291 #define DRb(x,b) *((double *)&sh4r.fr[b][(x)<<1])
   292 #define DR(x) *((double *)&sh4r.fr[x&1][x&0x0E])
   293 #define FPULf    (sh4r.fpul.f)
   294 #define FPULi    (sh4r.fpul.i)
   296 #define SH4_WRITE_STORE_QUEUE(addr,val) sh4r.store_queue[(addr>>2)&0xF] = val;
   298 #ifdef __cplusplus
   299 }
   300 #endif
   302 #endif /* !lxdream_sh4core_H */
.