Search
lxdream.org :: lxdream/src/sh4/sh4core.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/sh4core.h
changeset 740:dd11269ee48b
prev736:a02d1475ccfd
next759:f16975739abc
author nkeynes
date Wed Jul 16 10:40:10 2008 +0000 (14 years ago)
permissions -rw-r--r--
last change Rationalize the two SH4 run slice impls into sh4.c, and tidy up the vm exits.
Fixes broken soft-reset with emulator core
Fixes broken build without translator
file annotate diff log raw
nkeynes@10
     1
/**
nkeynes@586
     2
 * $Id$
nkeynes@10
     3
 * 
nkeynes@54
     4
 * This file defines the internal functions exported/used by the SH4 core, 
nkeynes@54
     5
 * except for disassembly functions defined in sh4dasm.h
nkeynes@10
     6
 *
nkeynes@10
     7
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@10
     8
 *
nkeynes@10
     9
 * This program is free software; you can redistribute it and/or modify
nkeynes@10
    10
 * it under the terms of the GNU General Public License as published by
nkeynes@10
    11
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@10
    12
 * (at your option) any later version.
nkeynes@10
    13
 *
nkeynes@10
    14
 * This program is distributed in the hope that it will be useful,
nkeynes@10
    15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@10
    16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@10
    17
 * GNU General Public License for more details.
nkeynes@1
    18
 */
nkeynes@30
    19
nkeynes@736
    20
#ifndef lxdream_sh4core_H
nkeynes@736
    21
#define lxdream_sh4core_H 1
nkeynes@1
    22
nkeynes@27
    23
#include <glib/gtypes.h>
nkeynes@1
    24
#include <stdint.h>
nkeynes@23
    25
#include <stdio.h>
nkeynes@378
    26
#include "mem.h"
nkeynes@586
    27
#include "sh4/sh4.h"
nkeynes@1
    28
nkeynes@1
    29
#ifdef __cplusplus
nkeynes@1
    30
extern "C" {
nkeynes@1
    31
#endif
nkeynes@1
    32
nkeynes@586
    33
/* Breakpoint data structure */
nkeynes@586
    34
extern struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
nkeynes@586
    35
extern int sh4_breakpoint_count;
nkeynes@586
    36
extern sh4ptr_t sh4_main_ram;
nkeynes@591
    37
extern gboolean sh4_starting;
nkeynes@27
    38
nkeynes@27
    39
/**
nkeynes@586
    40
 * Cached direct pointer to the current instruction page. If AT is on, this
nkeynes@586
    41
 * is derived from the ITLB, otherwise this will be the entire memory region.
nkeynes@586
    42
 * This is actually a fairly useful optimization, as we can make a lot of
nkeynes@586
    43
 * assumptions about the "current page" that we can't make in general for
nkeynes@586
    44
 * arbitrary virtual addresses.
nkeynes@27
    45
 */
nkeynes@586
    46
struct sh4_icache_struct {
nkeynes@586
    47
    sh4ptr_t page; // Page pointer (NULL if no page)
nkeynes@586
    48
    sh4vma_t page_vma; // virtual address of the page.
nkeynes@586
    49
    sh4addr_t page_ppa; // physical address of the page
nkeynes@586
    50
    uint32_t mask;  // page mask 
nkeynes@586
    51
};
nkeynes@586
    52
extern struct sh4_icache_struct sh4_icache;
nkeynes@586
    53
nkeynes@27
    54
/**
nkeynes@586
    55
 * Test if a given address is contained in the current icache entry
nkeynes@27
    56
 */
nkeynes@586
    57
#define IS_IN_ICACHE(addr) (sh4_icache.page_vma == ((addr) & sh4_icache.mask))
nkeynes@27
    58
/**
nkeynes@586
    59
 * Return a pointer for the given vma, under the assumption that it is
nkeynes@586
    60
 * actually contained in the current icache entry.
nkeynes@27
    61
 */
nkeynes@586
    62
#define GET_ICACHE_PTR(addr) (sh4_icache.page + ((addr)-sh4_icache.page_vma))
nkeynes@27
    63
/**
nkeynes@586
    64
 * Return the physical (external) address for the given vma, assuming that it is
nkeynes@586
    65
 * actually contained in the current icache entry.
nkeynes@27
    66
 */
nkeynes@586
    67
#define GET_ICACHE_PHYS(addr) (sh4_icache.page_ppa + ((addr)-sh4_icache.page_vma))
nkeynes@27
    68
nkeynes@589
    69
/**
nkeynes@589
    70
 * Return the virtual (vma) address for the first address past the end of the 
nkeynes@589
    71
 * cache entry. Assumes that there is in fact a current icache entry.
nkeynes@589
    72
 */
nkeynes@589
    73
#define GET_ICACHE_END() (sh4_icache.page_vma + (~sh4_icache.mask) + 1)
nkeynes@589
    74
nkeynes@740
    75
nkeynes@740
    76
/**
nkeynes@740
    77
 * SH4 vm-exit flag - exit the current block but continue (eg exception handling)
nkeynes@740
    78
 */
nkeynes@740
    79
#define CORE_EXIT_CONTINUE 1
nkeynes@740
    80
nkeynes@740
    81
/**
nkeynes@740
    82
 * SH4 vm-exit flag - exit the current block and halt immediately (eg fatal error)
nkeynes@740
    83
 */
nkeynes@740
    84
#define CORE_EXIT_HALT 2
nkeynes@740
    85
nkeynes@740
    86
/**
nkeynes@740
    87
 * SH4 vm-exit flag - exit the current block and halt immediately for a system
nkeynes@740
    88
 * breakpoint.
nkeynes@740
    89
 */
nkeynes@740
    90
#define CORE_EXIT_BREAKPOINT 3
nkeynes@740
    91
nkeynes@740
    92
/**
nkeynes@740
    93
 * SH4 vm-exit flag - exit the current block and continue after performing a full
nkeynes@740
    94
 * system reset (dreamcast_reset())
nkeynes@740
    95
 */
nkeynes@740
    96
#define CORE_EXIT_SYSRESET 4
nkeynes@740
    97
nkeynes@740
    98
/**
nkeynes@740
    99
 * SH4 vm-exit flag - exit the current block and continue after the next IRQ.
nkeynes@740
   100
 */
nkeynes@740
   101
#define CORE_EXIT_SLEEP 5
nkeynes@740
   102
nkeynes@740
   103
/**
nkeynes@740
   104
 * SH4 vm-exit flag - exit the current block  and flush all instruction caches (ie
nkeynes@740
   105
 * if address translation has changed)
nkeynes@740
   106
 */
nkeynes@740
   107
#define CORE_EXIT_FLUSH_ICACHE 6
nkeynes@740
   108
nkeynes@740
   109
typedef uint32_t (*sh4_run_slice_fn)(uint32_t);
nkeynes@740
   110
nkeynes@586
   111
/* SH4 module functions */
nkeynes@1
   112
void sh4_init( void );
nkeynes@1
   113
void sh4_reset( void );
nkeynes@1
   114
void sh4_run( void );
nkeynes@1
   115
void sh4_stop( void );
nkeynes@617
   116
uint32_t sh4_run_slice( uint32_t nanos ); // Run single timeslice using emulator
nkeynes@617
   117
uint32_t sh4_xlat_run_slice( uint32_t nanos ); // Run single timeslice using translator
nkeynes@617
   118
uint32_t sh4_sleep_run_slice( uint32_t nanos ); // Run single timeslice while the CPU is asleep
nkeynes@586
   119
nkeynes@740
   120
/**
nkeynes@740
   121
 * Immediately exit from the currently executing instruction with the given
nkeynes@740
   122
 * exit code. This method does not return.
nkeynes@740
   123
 */
nkeynes@740
   124
void sh4_core_exit( int exit_code );
nkeynes@740
   125
nkeynes@740
   126
/**
nkeynes@740
   127
 * Exit the current block at the end of the current instruction, flush the
nkeynes@740
   128
 * translation cache (completely) and return control to sh4_xlat_run_slice.
nkeynes@740
   129
 *
nkeynes@740
   130
 * As a special case, if the current instruction is actually the last 
nkeynes@740
   131
 * instruction in the block (ie it's in a delay slot), this function 
nkeynes@740
   132
 * returns to allow normal completion of the translation block. Otherwise
nkeynes@740
   133
 * this function never returns.
nkeynes@740
   134
 *
nkeynes@740
   135
 * Must only be invoked (indirectly) from within translated code.
nkeynes@740
   136
 */
nkeynes@740
   137
void sh4_flush_icache();
nkeynes@740
   138
nkeynes@586
   139
/* SH4 peripheral module functions */
nkeynes@586
   140
void CPG_reset( void );
nkeynes@586
   141
void DMAC_reset( void );
nkeynes@586
   142
void DMAC_run_slice( uint32_t );
nkeynes@586
   143
void DMAC_save_state( FILE * );
nkeynes@586
   144
int DMAC_load_state( FILE * );
nkeynes@586
   145
void INTC_reset( void );
nkeynes@586
   146
void INTC_save_state( FILE *f );
nkeynes@586
   147
int INTC_load_state( FILE *f );
nkeynes@586
   148
void MMU_init( void );
nkeynes@586
   149
void MMU_reset( void );
nkeynes@586
   150
void MMU_save_state( FILE *f );
nkeynes@586
   151
int MMU_load_state( FILE *f );
nkeynes@586
   152
void MMU_ldtlb();
nkeynes@586
   153
void SCIF_reset( void );
nkeynes@586
   154
void SCIF_run_slice( uint32_t );
nkeynes@586
   155
void SCIF_save_state( FILE *f );
nkeynes@586
   156
int SCIF_load_state( FILE *f );
nkeynes@586
   157
void SCIF_update_line_speed(void);
nkeynes@669
   158
void TMU_init( void );
nkeynes@586
   159
void TMU_reset( void );
nkeynes@586
   160
void TMU_run_slice( uint32_t );
nkeynes@586
   161
void TMU_save_state( FILE * );
nkeynes@586
   162
int TMU_load_state( FILE * );
nkeynes@586
   163
void TMU_update_clocks( void );
nkeynes@586
   164
nkeynes@586
   165
/* SH4 instruction support methods */
nkeynes@401
   166
void sh4_sleep( void );
nkeynes@401
   167
void sh4_fsca( uint32_t angle, float *fr );
nkeynes@669
   168
void sh4_ftrv( float *fv );
nkeynes@586
   169
uint32_t sh4_read_sr(void);
nkeynes@586
   170
void sh4_write_sr(uint32_t val);
nkeynes@669
   171
void sh4_write_fpscr(uint32_t val);
nkeynes@669
   172
void sh4_switch_fr_banks(void);
nkeynes@401
   173
void signsat48(void);
nkeynes@597
   174
gboolean sh4_has_page( sh4vma_t vma );
nkeynes@378
   175
nkeynes@586
   176
/* SH4 Memory */
nkeynes@603
   177
#define MMU_VMA_ERROR 0x80000000
nkeynes@586
   178
/**
nkeynes@586
   179
 * Update the sh4_icache structure to contain the specified vma. If the vma
nkeynes@586
   180
 * cannot be resolved, an MMU exception is raised and the function returns
nkeynes@586
   181
 * FALSE. Otherwise, returns TRUE and updates sh4_icache accordingly.
nkeynes@586
   182
 * Note: If the vma resolves to a non-memory area, sh4_icache will be 
nkeynes@586
   183
 * invalidated, but the function will still return TRUE.
nkeynes@586
   184
 * @return FALSE if an MMU exception was raised, otherwise TRUE.
nkeynes@586
   185
 */
nkeynes@586
   186
gboolean mmu_update_icache( sh4vma_t addr );
nkeynes@23
   187
nkeynes@586
   188
/**
nkeynes@586
   189
 * Resolve a virtual address through the TLB for a read operation, returning 
nkeynes@586
   190
 * the resultant P4 or external address. If the resolution fails, the 
nkeynes@586
   191
 * appropriate MMU exception is raised and the value MMU_VMA_ERROR is returned.
nkeynes@586
   192
 * @return An external address (0x00000000-0x1FFFFFFF), a P4 address
nkeynes@586
   193
 * (0xE0000000 - 0xFFFFFFFF), or MMU_VMA_ERROR.
nkeynes@586
   194
 */
nkeynes@586
   195
sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr );
nkeynes@586
   196
sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr );
nkeynes@597
   197
sh4addr_t mmu_vma_to_phys_disasm( sh4vma_t addr );
nkeynes@1
   198
nkeynes@527
   199
int64_t sh4_read_quad( sh4addr_t addr );
nkeynes@527
   200
int32_t sh4_read_long( sh4addr_t addr );
nkeynes@527
   201
int32_t sh4_read_word( sh4addr_t addr );
nkeynes@527
   202
int32_t sh4_read_byte( sh4addr_t addr );
nkeynes@527
   203
void sh4_write_quad( sh4addr_t addr, uint64_t val );
nkeynes@527
   204
void sh4_write_long( sh4addr_t addr, uint32_t val );
nkeynes@527
   205
void sh4_write_word( sh4addr_t addr, uint32_t val );
nkeynes@527
   206
void sh4_write_byte( sh4addr_t addr, uint32_t val );
nkeynes@527
   207
int32_t sh4_read_phys_word( sh4addr_t addr );
nkeynes@586
   208
gboolean sh4_flush_store_queue( sh4addr_t addr );
nkeynes@10
   209
nkeynes@586
   210
/* SH4 Exceptions */
nkeynes@586
   211
#define EXC_POWER_RESET     0x000 /* reset vector */
nkeynes@586
   212
#define EXC_MANUAL_RESET    0x020 /* reset vector */
nkeynes@586
   213
#define EXC_TLB_MISS_READ   0x040 /* TLB vector */
nkeynes@586
   214
#define EXC_TLB_MISS_WRITE  0x060 /* TLB vector */
nkeynes@586
   215
#define EXC_INIT_PAGE_WRITE 0x080
nkeynes@586
   216
#define EXC_TLB_PROT_READ   0x0A0
nkeynes@586
   217
#define EXC_TLB_PROT_WRITE  0x0C0
nkeynes@586
   218
#define EXC_DATA_ADDR_READ  0x0E0
nkeynes@586
   219
#define EXC_DATA_ADDR_WRITE 0x100
nkeynes@586
   220
#define EXC_TLB_MULTI_HIT   0x140
nkeynes@586
   221
#define EXC_SLOT_ILLEGAL    0x1A0
nkeynes@586
   222
#define EXC_ILLEGAL         0x180
nkeynes@586
   223
#define EXC_TRAP            0x160
nkeynes@586
   224
#define EXC_FPU_DISABLED    0x800
nkeynes@586
   225
#define EXC_SLOT_FPU_DISABLED 0x820
nkeynes@374
   226
nkeynes@586
   227
#define EXV_EXCEPTION    0x100  /* General exception vector */
nkeynes@586
   228
#define EXV_TLBMISS      0x400  /* TLB-miss exception vector */
nkeynes@586
   229
#define EXV_INTERRUPT    0x600  /* External interrupt vector */
nkeynes@586
   230
nkeynes@586
   231
gboolean sh4_raise_exception( int );
nkeynes@586
   232
gboolean sh4_raise_reset( int );
nkeynes@586
   233
gboolean sh4_raise_trap( int );
nkeynes@586
   234
gboolean sh4_raise_slot_exception( int, int );
nkeynes@586
   235
gboolean sh4_raise_tlb_exception( int );
nkeynes@586
   236
void sh4_accept_interrupt( void );
nkeynes@1
   237
nkeynes@1
   238
#define SIGNEXT4(n) ((((int32_t)(n))<<28)>>28)
nkeynes@1
   239
#define SIGNEXT8(n) ((int32_t)((int8_t)(n)))
nkeynes@1
   240
#define SIGNEXT12(n) ((((int32_t)(n))<<20)>>20)
nkeynes@1
   241
#define SIGNEXT16(n) ((int32_t)((int16_t)(n)))
nkeynes@1
   242
#define SIGNEXT32(n) ((int64_t)((int32_t)(n)))
nkeynes@1
   243
#define SIGNEXT48(n) ((((int64_t)(n))<<16)>>16)
nkeynes@586
   244
#define ZEROEXT32(n) ((int64_t)((uint64_t)((uint32_t)(n))))
nkeynes@1
   245
nkeynes@1
   246
/* Status Register (SR) bits */
nkeynes@1
   247
#define SR_MD    0x40000000 /* Processor mode ( User=0, Privileged=1 ) */ 
nkeynes@1
   248
#define SR_RB    0x20000000 /* Register bank (priviledged mode only) */
nkeynes@1
   249
#define SR_BL    0x10000000 /* Exception/interupt block (1 = masked) */
nkeynes@1
   250
#define SR_FD    0x00008000 /* FPU disable */
nkeynes@1
   251
#define SR_M     0x00000200
nkeynes@1
   252
#define SR_Q     0x00000100
nkeynes@1
   253
#define SR_IMASK 0x000000F0 /* Interrupt mask level */
nkeynes@1
   254
#define SR_S     0x00000002 /* Saturation operation for MAC instructions */
nkeynes@1
   255
#define SR_T     0x00000001 /* True/false or carry/borrow */
nkeynes@1
   256
#define SR_MASK  0x700083F3
nkeynes@1
   257
#define SR_MQSTMASK 0xFFFFFCFC /* Mask to clear the flags we're keeping separately */
nkeynes@586
   258
#define SR_MDRB  0x60000000 /* MD+RB mask for convenience */
nkeynes@1
   259
nkeynes@1
   260
#define IS_SH4_PRIVMODE() (sh4r.sr&SR_MD)
nkeynes@1
   261
#define SH4_INTMASK() ((sh4r.sr&SR_IMASK)>>4)
nkeynes@265
   262
#define SH4_EVENT_PENDING() (sh4r.event_pending <= sh4r.slice_cycle && !sh4r.in_delay_slot)
nkeynes@1
   263
nkeynes@1
   264
#define FPSCR_FR     0x00200000 /* FPU register bank */
nkeynes@1
   265
#define FPSCR_SZ     0x00100000 /* FPU transfer size (0=32 bits, 1=64 bits) */
nkeynes@1
   266
#define FPSCR_PR     0x00080000 /* Precision (0=32 bites, 1=64 bits) */
nkeynes@1
   267
#define FPSCR_DN     0x00040000 /* Denormalization mode (1 = treat as 0) */
nkeynes@1
   268
#define FPSCR_CAUSE  0x0003F000
nkeynes@1
   269
#define FPSCR_ENABLE 0x00000F80
nkeynes@1
   270
#define FPSCR_FLAG   0x0000007C
nkeynes@1
   271
#define FPSCR_RM     0x00000003 /* Rounding mode (0=nearest, 1=to zero) */
nkeynes@1
   272
nkeynes@1
   273
#define IS_FPU_DOUBLEPREC() (sh4r.fpscr&FPSCR_PR)
nkeynes@1
   274
#define IS_FPU_DOUBLESIZE() (sh4r.fpscr&FPSCR_SZ)
nkeynes@1
   275
#define IS_FPU_ENABLED() ((sh4r.sr&SR_FD)==0)
nkeynes@1
   276
nkeynes@669
   277
#define FR(x) sh4r.fr[0][(x)^1]
nkeynes@669
   278
#define DRF(x) *((double *)&sh4r.fr[0][(x)<<1])
nkeynes@669
   279
#define XF(x) sh4r.fr[1][(x)^1]
nkeynes@669
   280
#define XDR(x) *((double *)&sh4r.fr[1][(x)<<1])
nkeynes@669
   281
#define DRb(x,b) *((double *)&sh4r.fr[b][(x)<<1])
nkeynes@669
   282
#define DR(x) *((double *)&sh4r.fr[x&1][x&0x0E])
nkeynes@669
   283
#define FPULf    (sh4r.fpul.f)
nkeynes@669
   284
#define FPULi    (sh4r.fpul.i)
nkeynes@359
   285
nkeynes@2
   286
#define SH4_WRITE_STORE_QUEUE(addr,val) sh4r.store_queue[(addr>>2)&0xF] = val;
nkeynes@1
   287
nkeynes@1
   288
#ifdef __cplusplus
nkeynes@1
   289
}
nkeynes@1
   290
#endif
nkeynes@359
   291
nkeynes@736
   292
#endif /* !lxdream_sh4core_H */
nkeynes@736
   293
.