Search
lxdream.org :: lxdream/src/sh4/sh4core.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/sh4core.h
changeset 939:6f2302afeb89
prev934:3acd3b3ee6d1
next945:787729653236
author nkeynes
date Sat Jan 03 08:55:15 2009 +0000 (15 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Implement CORE_EXIT_EXCEPTION for use when direct frame messing about doesn't work
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * This file defines the internal functions exported/used by the SH4 core, 
     5  * except for disassembly functions defined in sh4dasm.h
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #ifndef lxdream_sh4core_H
    21 #define lxdream_sh4core_H 1
    23 #include <glib/gtypes.h>
    24 #include <stdint.h>
    25 #include <stdio.h>
    26 #include "mem.h"
    27 #include "sh4/sh4.h"
    29 #ifdef __cplusplus
    30 extern "C" {
    31 #endif
    33 /* Breakpoint data structure */
    34 extern struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
    35 extern int sh4_breakpoint_count;
    36 extern gboolean sh4_starting;
    38 /**
    39  * Cached direct pointer to the current instruction page. If AT is on, this
    40  * is derived from the ITLB, otherwise this will be the entire memory region.
    41  * This is actually a fairly useful optimization, as we can make a lot of
    42  * assumptions about the "current page" that we can't make in general for
    43  * arbitrary virtual addresses.
    44  */
    45 struct sh4_icache_struct {
    46     sh4ptr_t page; // Page pointer (NULL if no page)
    47     sh4vma_t page_vma; // virtual address of the page.
    48     sh4addr_t page_ppa; // physical address of the page
    49     uint32_t mask;  // page mask 
    50 };
    51 extern struct sh4_icache_struct sh4_icache;
    53 extern struct mem_region_fn **sh4_address_space;
    54 extern struct mem_region_fn **sh4_user_address_space;
    55 extern struct mem_region_fn **storequeue_address_space;
    56 extern struct mem_region_fn **storequeue_user_address_space;
    58 /**
    59  * Test if a given address is contained in the current icache entry
    60  */
    61 #define IS_IN_ICACHE(addr) (sh4_icache.page_vma == ((addr) & sh4_icache.mask))
    62 /**
    63  * Return a pointer for the given vma, under the assumption that it is
    64  * actually contained in the current icache entry.
    65  */
    66 #define GET_ICACHE_PTR(addr) (sh4_icache.page + ((addr)-sh4_icache.page_vma))
    67 /**
    68  * Return the physical (external) address for the given vma, assuming that it is
    69  * actually contained in the current icache entry.
    70  */
    71 #define GET_ICACHE_PHYS(addr) (sh4_icache.page_ppa + ((addr)-sh4_icache.page_vma))
    73 /**
    74  * Return the virtual (vma) address for the first address past the end of the 
    75  * cache entry. Assumes that there is in fact a current icache entry.
    76  */
    77 #define GET_ICACHE_END() (sh4_icache.page_vma + (~sh4_icache.mask) + 1)
    80 /**
    81  * SH4 vm-exit flag - exit the current block but continue (eg exception handling)
    82  */
    83 #define CORE_EXIT_CONTINUE 1
    85 /**
    86  * SH4 vm-exit flag - exit the current block and halt immediately (eg fatal error)
    87  */
    88 #define CORE_EXIT_HALT 2
    90 /**
    91  * SH4 vm-exit flag - exit the current block and halt immediately for a system
    92  * breakpoint.
    93  */
    94 #define CORE_EXIT_BREAKPOINT 3
    96 /**
    97  * SH4 vm-exit flag - exit the current block and continue after performing a full
    98  * system reset (dreamcast_reset())
    99  */
   100 #define CORE_EXIT_SYSRESET 4
   102 /**
   103  * SH4 vm-exit flag - exit the current block and continue after the next IRQ.
   104  */
   105 #define CORE_EXIT_SLEEP 5
   107 /**
   108  * SH4 vm-exit flag - exit the current block and flush all instruction caches (ie
   109  * if address translation has changed)
   110  */
   111 #define CORE_EXIT_FLUSH_ICACHE 6
   113 /**
   114  * SH4 vm-exit flag - exit the current block following a taken exception. sh4r.spc
   115  * is fixed up by recovery rather than sh4r.pc.
   116  */
   117 #define CORE_EXIT_EXCEPTION 7
   119 typedef uint32_t (*sh4_run_slice_fn)(uint32_t);
   121 /* SH4 module functions */
   122 void sh4_init( void );
   123 void sh4_reset( void );
   124 void sh4_run( void );
   125 void sh4_stop( void );
   126 uint32_t sh4_run_slice( uint32_t nanos ); // Run single timeslice using emulator
   127 uint32_t sh4_xlat_run_slice( uint32_t nanos ); // Run single timeslice using translator
   128 uint32_t sh4_sleep_run_slice( uint32_t nanos ); // Run single timeslice while the CPU is asleep
   130 /**
   131  * Immediately exit from the currently executing instruction with the given
   132  * exit code. This method does not return.
   133  */
   134 void sh4_core_exit( int exit_code );
   136 /**
   137  * Exit the current block at the end of the current instruction, flush the
   138  * translation cache (completely) and return control to sh4_xlat_run_slice.
   139  *
   140  * As a special case, if the current instruction is actually the last 
   141  * instruction in the block (ie it's in a delay slot), this function 
   142  * returns to allow normal completion of the translation block. Otherwise
   143  * this function never returns.
   144  *
   145  * Must only be invoked (indirectly) from within translated code.
   146  */
   147 void sh4_flush_icache();
   149 /* SH4 peripheral module functions */
   150 void CPG_reset( void );
   151 void DMAC_reset( void );
   152 void DMAC_run_slice( uint32_t );
   153 void DMAC_save_state( FILE * );
   154 int DMAC_load_state( FILE * );
   155 void INTC_reset( void );
   156 void INTC_save_state( FILE *f );
   157 int INTC_load_state( FILE *f );
   158 void MMU_reset( void );
   159 void MMU_save_state( FILE *f );
   160 int MMU_load_state( FILE *f );
   161 void MMU_ldtlb();
   162 void CCN_save_state( FILE *f );
   163 int CCN_load_state( FILE *f );
   164 void SCIF_reset( void );
   165 void SCIF_run_slice( uint32_t );
   166 void SCIF_save_state( FILE *f );
   167 int SCIF_load_state( FILE *f );
   168 void SCIF_update_line_speed(void);
   169 void TMU_init( void );
   170 void TMU_reset( void );
   171 void TMU_run_slice( uint32_t );
   172 void TMU_save_state( FILE * );
   173 int TMU_load_state( FILE * );
   174 void TMU_update_clocks( void );
   175 void PMM_reset( void );
   176 void PMM_write_control( int, uint32_t );
   177 void PMM_save_state( FILE * );
   178 int PMM_load_state( FILE * );
   179 uint32_t PMM_run_slice( uint32_t );
   180 uint32_t sh4_translate_run_slice(uint32_t);
   181 uint32_t sh4_emulate_run_slice(uint32_t);
   183 /* SH4 instruction support methods */
   184 mem_region_fn_t FASTCALL sh7750_decode_address( sh4addr_t address );
   185 void FASTCALL sh7750_decode_address_copy( sh4addr_t address, mem_region_fn_t result );
   186 void FASTCALL sh4_sleep( void );
   187 void FASTCALL sh4_fsca( uint32_t angle, float *fr );
   188 void FASTCALL sh4_ftrv( float *fv );
   189 uint32_t FASTCALL sh4_read_sr(void);
   190 void FASTCALL sh4_write_sr(uint32_t val);
   191 void FASTCALL sh4_write_fpscr(uint32_t val);
   192 void FASTCALL sh4_switch_fr_banks(void);
   193 void FASTCALL signsat48(void);
   194 gboolean sh4_has_page( sh4vma_t vma );
   196 /* SH4 Memory */
   197 #define MMU_VMA_ERROR 0x80000000
   198 /**
   199  * Update the sh4_icache structure to contain the specified vma. If the vma
   200  * cannot be resolved, an MMU exception is raised and the function returns
   201  * FALSE. Otherwise, returns TRUE and updates sh4_icache accordingly.
   202  * Note: If the vma resolves to a non-memory area, sh4_icache will be 
   203  * invalidated, but the function will still return TRUE.
   204  * @return FALSE if an MMU exception was raised, otherwise TRUE.
   205  */
   206 gboolean FASTCALL mmu_update_icache( sh4vma_t addr );
   208 /**
   209  * Resolve a virtual address through the TLB for a read operation, returning 
   210  * the resultant P4 or external address. If the resolution fails, the 
   211  * appropriate MMU exception is raised and the value MMU_VMA_ERROR is returned.
   212  * @return An external address (0x00000000-0x1FFFFFFF), a P4 address
   213  * (0xE0000000 - 0xFFFFFFFF), or MMU_VMA_ERROR.
   214  */
   215 #ifdef HAVE_FRAME_ADDRESS
   216 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc );
   217 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc );
   218 #else
   219 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr );
   220 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr );
   221 #endif
   222 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t addr );
   224 int64_t FASTCALL sh4_read_quad( sh4addr_t addr );
   225 int32_t FASTCALL sh4_read_long( sh4addr_t addr );
   226 int32_t FASTCALL sh4_read_word( sh4addr_t addr );
   227 int32_t FASTCALL sh4_read_byte( sh4addr_t addr );
   228 void FASTCALL sh4_write_quad( sh4addr_t addr, uint64_t val );
   229 void FASTCALL sh4_write_long( sh4addr_t addr, uint32_t val );
   230 void FASTCALL sh4_write_word( sh4addr_t addr, uint32_t val );
   231 void FASTCALL sh4_write_byte( sh4addr_t addr, uint32_t val );
   232 int32_t sh4_read_phys_word( sh4addr_t addr );
   233 void FASTCALL sh4_flush_store_queue( sh4addr_t addr );
   234 void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc );
   236 /* SH4 Exceptions */
   237 #define EXC_POWER_RESET     0x000 /* reset vector */
   238 #define EXC_MANUAL_RESET    0x020 /* reset vector */
   239 #define EXC_TLB_MISS_READ   0x040 /* TLB vector */
   240 #define EXC_TLB_MISS_WRITE  0x060 /* TLB vector */
   241 #define EXC_INIT_PAGE_WRITE 0x080
   242 #define EXC_TLB_PROT_READ   0x0A0
   243 #define EXC_TLB_PROT_WRITE  0x0C0
   244 #define EXC_DATA_ADDR_READ  0x0E0
   245 #define EXC_DATA_ADDR_WRITE 0x100
   246 #define EXC_TLB_MULTI_HIT   0x140
   247 #define EXC_SLOT_ILLEGAL    0x1A0
   248 #define EXC_ILLEGAL         0x180
   249 #define EXC_TRAP            0x160
   250 #define EXC_FPU_DISABLED    0x800
   251 #define EXC_SLOT_FPU_DISABLED 0x820
   253 #define EXV_EXCEPTION    0x100  /* General exception vector */
   254 #define EXV_TLBMISS      0x400  /* TLB-miss exception vector */
   255 #define EXV_INTERRUPT    0x600  /* External interrupt vector */
   257 gboolean FASTCALL sh4_raise_exception( int );
   258 gboolean FASTCALL sh4_raise_reset( int );
   259 gboolean FASTCALL sh4_raise_trap( int );
   260 gboolean FASTCALL sh4_raise_slot_exception( int, int );
   261 gboolean FASTCALL sh4_raise_tlb_exception( int );
   262 void FASTCALL sh4_accept_interrupt( void );
   264 /* Status Register (SR) bits */
   265 #define SR_MD    0x40000000 /* Processor mode ( User=0, Privileged=1 ) */ 
   266 #define SR_RB    0x20000000 /* Register bank (priviledged mode only) */
   267 #define SR_BL    0x10000000 /* Exception/interupt block (1 = masked) */
   268 #define SR_FD    0x00008000 /* FPU disable */
   269 #define SR_M     0x00000200
   270 #define SR_Q     0x00000100
   271 #define SR_IMASK 0x000000F0 /* Interrupt mask level */
   272 #define SR_S     0x00000002 /* Saturation operation for MAC instructions */
   273 #define SR_T     0x00000001 /* True/false or carry/borrow */
   274 #define SR_MASK  0x700083F3
   275 #define SR_MQSTMASK 0xFFFFFCFC /* Mask to clear the flags we're keeping separately */
   276 #define SR_MDRB  0x60000000 /* MD+RB mask for convenience */
   278 #define IS_SH4_PRIVMODE() (sh4r.sr&SR_MD)
   279 #define SH4_INTMASK() ((sh4r.sr&SR_IMASK)>>4)
   280 #define SH4_EVENT_PENDING() (sh4r.event_pending <= sh4r.slice_cycle && !sh4r.in_delay_slot)
   282 #define FPSCR_FR     0x00200000 /* FPU register bank */
   283 #define FPSCR_SZ     0x00100000 /* FPU transfer size (0=32 bits, 1=64 bits) */
   284 #define FPSCR_PR     0x00080000 /* Precision (0=32 bites, 1=64 bits) */
   285 #define FPSCR_DN     0x00040000 /* Denormalization mode (1 = treat as 0) */
   286 #define FPSCR_CAUSE  0x0003F000
   287 #define FPSCR_ENABLE 0x00000F80
   288 #define FPSCR_FLAG   0x0000007C
   289 #define FPSCR_RM     0x00000003 /* Rounding mode (0=nearest, 1=to zero) */
   290 #define FPSCR_MASK   0x003FFFFF
   292 #define IS_FPU_DOUBLEPREC() (sh4r.fpscr&FPSCR_PR)
   293 #define IS_FPU_DOUBLESIZE() (sh4r.fpscr&FPSCR_SZ)
   294 #define IS_FPU_ENABLED() ((sh4r.sr&SR_FD)==0)
   296 #define FR(x) sh4r.fr[0][(x)^1]
   297 #define DRF(x) *((double *)&sh4r.fr[0][(x)<<1])
   298 #define XF(x) sh4r.fr[1][(x)^1]
   299 #define XDR(x) *((double *)&sh4r.fr[1][(x)<<1])
   300 #define DRb(x,b) *((double *)&sh4r.fr[b][(x)<<1])
   301 #define DR(x) *((double *)&sh4r.fr[x&1][x&0x0E])
   302 #define FPULf    (sh4r.fpul.f)
   303 #define FPULi    (sh4r.fpul.i)
   305 /**************** SH4 internal memory regions *****************/
   306 extern struct mem_region_fn p4_region_storequeue; 
   307 extern struct mem_region_fn p4_region_itlb_addr;
   308 extern struct mem_region_fn p4_region_itlb_data;
   309 extern struct mem_region_fn p4_region_utlb_addr;
   310 extern struct mem_region_fn p4_region_utlb_data;
   311 extern struct mem_region_fn p4_region_icache_addr;
   312 extern struct mem_region_fn p4_region_icache_data;
   313 extern struct mem_region_fn p4_region_ocache_addr;
   314 extern struct mem_region_fn p4_region_ocache_data;
   315 extern struct mem_region_fn mem_region_address_error;
   316 extern struct mem_region_fn mem_region_tlb_miss;
   317 extern struct mem_region_fn mem_region_tlb_multihit;
   318 extern struct mem_region_fn mem_region_user_protected;
   321 #ifdef __cplusplus
   322 }
   323 #endif
   325 #endif /* !lxdream_sh4core_H */
.