Search
lxdream.org :: lxdream :: r953:f4a156508ad1
lxdream 0.9.1
released Jun 29
Download Now
changeset953:f4a156508ad1
parent927:17b6b9e245d8
child954:59e17ce91c55
authornkeynes
dateTue Jan 13 11:56:28 2009 +0000 (11 years ago)
Merge lxdream-mem branch back to trunk
Makefile.in
src/Makefile.am
src/Makefile.in
src/aica/aica.c
src/aica/aica.h
src/aica/armcore.h
src/aica/armmem.c
src/aica/audio.c
src/asic.c
src/dreamcast.c
src/dreamcast.h
src/lxdream.h
src/mem.c
src/mem.h
src/mmio.h
src/pvr2/glrender.c
src/pvr2/pvr2.c
src/pvr2/pvr2.h
src/pvr2/pvr2mem.c
src/pvr2/pvr2mmio.h
src/pvr2/rendsave.c
src/pvr2/rendsort.c
src/pvr2/scene.c
src/pvr2/tacore.c
src/sdram.c
src/sh4/cache.c
src/sh4/dmac.c
src/sh4/ia32abi.h
src/sh4/ia64abi.h
src/sh4/intc.c
src/sh4/mmu.c
src/sh4/mmu.h
src/sh4/mmux86.c
src/sh4/pmm.c
src/sh4/scif.c
src/sh4/sh4.c
src/sh4/sh4.h
src/sh4/sh4core.h
src/sh4/sh4core.in
src/sh4/sh4dasm.in
src/sh4/sh4mem.c
src/sh4/sh4mmio.c
src/sh4/sh4mmio.h
src/sh4/sh4stat.in
src/sh4/sh4trans.c
src/sh4/sh4trans.h
src/sh4/sh4x86.in
src/sh4/timer.c
src/sh4/x86op.h
src/sh4/xltcache.c
src/sh4/xltcache.h
src/test/testsh4x86.c
src/tools/actparse.c
src/tools/gendec.c
src/tools/gendec.h
src/x86dasm/x86dasm.c
1.1 --- a/Makefile.in Mon Dec 15 10:44:56 2008 +0000
1.2 +++ b/Makefile.in Tue Jan 13 11:56:28 2009 +0000
1.3 @@ -44,7 +44,8 @@
1.4 config.sub depcomp install-sh missing mkinstalldirs
1.5 subdir = .
1.6 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
1.7 -am__aclocal_m4_deps = $(top_srcdir)/configure.in
1.8 +am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
1.9 + $(top_srcdir)/configure.in
1.10 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
1.11 $(ACLOCAL_M4)
1.12 am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
2.1 --- a/src/Makefile.am Mon Dec 15 10:44:56 2008 +0000
2.2 +++ b/src/Makefile.am Tue Jan 13 11:56:28 2009 +0000
2.3 @@ -34,7 +34,7 @@
2.4
2.5 lxdream_SOURCES = \
2.6 main.c version.c config.c config.h lxdream.h dream.h gui.h cpu.h hook.h \
2.7 - gettext.h mem.c mem.h mmio.h paths.c watch.c \
2.8 + gettext.h mem.c mem.h sdram.c mmio.h paths.c watch.c \
2.9 asic.c asic.h clock.h serial.h \
2.10 syscall.c syscall.h bios.c dcload.c \
2.11 gdrom/ide.c gdrom/ide.h gdrom/packet.h gdrom/gdimage.c \
2.12 @@ -46,6 +46,7 @@
2.13 sh4/mmu.c sh4/sh4core.c sh4/sh4core.h sh4/sh4dasm.c sh4/sh4dasm.h \
2.14 sh4/sh4mmio.c sh4/sh4mmio.h sh4/scif.c sh4/sh4stat.c sh4/sh4stat.h \
2.15 sh4/xltcache.c sh4/xltcache.h sh4/sh4.h sh4/dmac.h sh4/pmm.c \
2.16 + sh4/cache.c sh4/mmu.h \
2.17 aica/armcore.c aica/armcore.h aica/armdasm.c aica/armdasm.h aica/armmem.c \
2.18 aica/aica.c aica/aica.h aica/audio.c aica/audio.h \
2.19 pvr2/pvr2.c pvr2/pvr2.h pvr2/pvr2mem.c pvr2/pvr2mmio.h \
2.20 @@ -65,7 +66,7 @@
2.21 if BUILD_SH4X86
2.22 lxdream_SOURCES += sh4/sh4x86.c sh4/x86op.h \
2.23 sh4/ia32abi.h sh4/ia32mac.h sh4/ia64abi.h \
2.24 - sh4/sh4trans.c sh4/sh4trans.h \
2.25 + sh4/sh4trans.c sh4/sh4trans.h sh4/mmux86.c \
2.26 x86dasm/x86dasm.c x86dasm/x86dasm.h \
2.27 x86dasm/i386-dis.c x86dasm/dis-init.c x86dasm/dis-buf.c \
2.28 x86dasm/ansidecl.h x86dasm/bfd.h x86dasm/dis-asm.h \
2.29 @@ -75,8 +76,8 @@
2.30 test_testsh4x86_SOURCES = test/testsh4x86.c x86dasm/x86dasm.c \
2.31 x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
2.32 x86dasm/dis-buf.c \
2.33 - sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
2.34 - sh4/xltcache.h mem.c util.c sh4/mmu.c
2.35 + sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
2.36 + sh4/xltcache.h mem.c util.c
2.37
2.38 check_PROGRAMS += test/testsh4x86
2.39 endif
3.1 --- a/src/Makefile.in Mon Dec 15 10:44:56 2008 +0000
3.2 +++ b/src/Makefile.in Tue Jan 13 11:56:28 2009 +0000
3.3 @@ -41,7 +41,7 @@
3.4 check_PROGRAMS = test/testxlt$(EXEEXT) $(am__EXEEXT_1)
3.5 @BUILD_SH4X86_TRUE@am__append_1 = sh4/sh4x86.c sh4/x86op.h \
3.6 @BUILD_SH4X86_TRUE@ sh4/ia32abi.h sh4/ia32mac.h sh4/ia64abi.h \
3.7 -@BUILD_SH4X86_TRUE@ sh4/sh4trans.c sh4/sh4trans.h \
3.8 +@BUILD_SH4X86_TRUE@ sh4/sh4trans.c sh4/sh4trans.h sh4/mmux86.c \
3.9 @BUILD_SH4X86_TRUE@ x86dasm/x86dasm.c x86dasm/x86dasm.h \
3.10 @BUILD_SH4X86_TRUE@ x86dasm/i386-dis.c x86dasm/dis-init.c x86dasm/dis-buf.c \
3.11 @BUILD_SH4X86_TRUE@ x86dasm/ansidecl.h x86dasm/bfd.h x86dasm/dis-asm.h \
3.12 @@ -73,7 +73,8 @@
3.13 subdir = src
3.14 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
3.15 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
3.16 -am__aclocal_m4_deps = $(top_srcdir)/configure.in
3.17 +am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
3.18 + $(top_srcdir)/configure.in
3.19 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
3.20 $(ACLOCAL_M4)
3.21 mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
3.22 @@ -93,7 +94,7 @@
3.23 genglsl_DEPENDENCIES = $(am__DEPENDENCIES_1)
3.24 am__lxdream_SOURCES_DIST = main.c version.c config.c config.h \
3.25 lxdream.h dream.h gui.h cpu.h hook.h gettext.h mem.c mem.h \
3.26 - mmio.h paths.c watch.c asic.c asic.h clock.h serial.h \
3.27 + sdram.c mmio.h paths.c watch.c asic.c asic.h clock.h serial.h \
3.28 syscall.c syscall.h bios.c dcload.c gdrom/ide.c gdrom/ide.h \
3.29 gdrom/packet.h gdrom/gdimage.c gdrom/gdrom.c gdrom/gdrom.h \
3.30 gdrom/nrg.c gdrom/cdi.c gdrom/gdi.c gdrom/edc_ecc.c \
3.31 @@ -104,22 +105,23 @@
3.32 sh4/dmac.c sh4/mmu.c sh4/sh4core.c sh4/sh4core.h sh4/sh4dasm.c \
3.33 sh4/sh4dasm.h sh4/sh4mmio.c sh4/sh4mmio.h sh4/scif.c \
3.34 sh4/sh4stat.c sh4/sh4stat.h sh4/xltcache.c sh4/xltcache.h \
3.35 - sh4/sh4.h sh4/dmac.h sh4/pmm.c aica/armcore.c aica/armcore.h \
3.36 - aica/armdasm.c aica/armdasm.h aica/armmem.c aica/aica.c \
3.37 - aica/aica.h aica/audio.c aica/audio.h pvr2/pvr2.c pvr2/pvr2.h \
3.38 - pvr2/pvr2mem.c pvr2/pvr2mmio.h pvr2/tacore.c pvr2/rendsort.c \
3.39 - pvr2/texcache.c pvr2/yuv.c pvr2/rendsave.c pvr2/scene.c \
3.40 - pvr2/scene.h pvr2/gl_sl.c pvr2/gl_slsrc.c pvr2/glutil.c \
3.41 - pvr2/glutil.h pvr2/glrender.c pvr2/vertex.glsl \
3.42 - pvr2/fragment.glsl maple/maple.c maple/maple.h \
3.43 - maple/controller.c maple/kbd.c maple/mouse.c maple/lightgun.c \
3.44 - loader.c loader.h elf.h bootstrap.c bootstrap.h util.c \
3.45 - gdlist.c gdlist.h display.c display.h dckeysyms.h \
3.46 - drivers/audio_null.c drivers/video_null.c drivers/video_gl.c \
3.47 - drivers/video_gl.h drivers/gl_fbo.c sh4/sh4.def sh4/sh4core.in \
3.48 - sh4/sh4x86.in sh4/sh4dasm.in sh4/sh4stat.in sh4/sh4x86.c \
3.49 - sh4/x86op.h sh4/ia32abi.h sh4/ia32mac.h sh4/ia64abi.h \
3.50 - sh4/sh4trans.c sh4/sh4trans.h x86dasm/x86dasm.c \
3.51 + sh4/sh4.h sh4/dmac.h sh4/pmm.c sh4/cache.c sh4/mmu.h \
3.52 + aica/armcore.c aica/armcore.h aica/armdasm.c aica/armdasm.h \
3.53 + aica/armmem.c aica/aica.c aica/aica.h aica/audio.c \
3.54 + aica/audio.h pvr2/pvr2.c pvr2/pvr2.h pvr2/pvr2mem.c \
3.55 + pvr2/pvr2mmio.h pvr2/tacore.c pvr2/rendsort.c pvr2/texcache.c \
3.56 + pvr2/yuv.c pvr2/rendsave.c pvr2/scene.c pvr2/scene.h \
3.57 + pvr2/gl_sl.c pvr2/gl_slsrc.c pvr2/glutil.c pvr2/glutil.h \
3.58 + pvr2/glrender.c pvr2/vertex.glsl pvr2/fragment.glsl \
3.59 + maple/maple.c maple/maple.h maple/controller.c maple/kbd.c \
3.60 + maple/mouse.c maple/lightgun.c loader.c loader.h elf.h \
3.61 + bootstrap.c bootstrap.h util.c gdlist.c gdlist.h display.c \
3.62 + display.h dckeysyms.h drivers/audio_null.c \
3.63 + drivers/video_null.c drivers/video_gl.c drivers/video_gl.h \
3.64 + drivers/gl_fbo.c sh4/sh4.def sh4/sh4core.in sh4/sh4x86.in \
3.65 + sh4/sh4dasm.in sh4/sh4stat.in sh4/sh4x86.c sh4/x86op.h \
3.66 + sh4/ia32abi.h sh4/ia32mac.h sh4/ia64abi.h sh4/sh4trans.c \
3.67 + sh4/sh4trans.h sh4/mmux86.c x86dasm/x86dasm.c \
3.68 x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
3.69 x86dasm/dis-buf.c x86dasm/ansidecl.h x86dasm/bfd.h \
3.70 x86dasm/dis-asm.h x86dasm/symcat.h x86dasm/sysdep.h \
3.71 @@ -137,8 +139,9 @@
3.72 drivers/osx_iokit.h drivers/cd_none.c drivers/joy_linux.c \
3.73 drivers/joy_linux.h
3.74 @BUILD_SH4X86_TRUE@am__objects_1 = sh4x86.$(OBJEXT) sh4trans.$(OBJEXT) \
3.75 -@BUILD_SH4X86_TRUE@ x86dasm.$(OBJEXT) i386-dis.$(OBJEXT) \
3.76 -@BUILD_SH4X86_TRUE@ dis-init.$(OBJEXT) dis-buf.$(OBJEXT)
3.77 +@BUILD_SH4X86_TRUE@ mmux86.$(OBJEXT) x86dasm.$(OBJEXT) \
3.78 +@BUILD_SH4X86_TRUE@ i386-dis.$(OBJEXT) dis-init.$(OBJEXT) \
3.79 +@BUILD_SH4X86_TRUE@ dis-buf.$(OBJEXT)
3.80 @GUI_GTK_TRUE@am__objects_2 = gtkui.$(OBJEXT) gtk_win.$(OBJEXT) \
3.81 @GUI_GTK_TRUE@ gtkcb.$(OBJEXT) gtk_mmio.$(OBJEXT) \
3.82 @GUI_GTK_TRUE@ gtk_debug.$(OBJEXT) gtk_dump.$(OBJEXT) \
3.83 @@ -160,42 +163,42 @@
3.84 @CDROM_NONE_TRUE@am__objects_13 = cd_none.$(OBJEXT)
3.85 @JOY_LINUX_TRUE@am__objects_14 = joy_linux.$(OBJEXT)
3.86 am_lxdream_OBJECTS = main.$(OBJEXT) version.$(OBJEXT) config.$(OBJEXT) \
3.87 - mem.$(OBJEXT) paths.$(OBJEXT) watch.$(OBJEXT) asic.$(OBJEXT) \
3.88 - syscall.$(OBJEXT) bios.$(OBJEXT) dcload.$(OBJEXT) \
3.89 - ide.$(OBJEXT) gdimage.$(OBJEXT) gdrom.$(OBJEXT) nrg.$(OBJEXT) \
3.90 - cdi.$(OBJEXT) gdi.$(OBJEXT) edc_ecc.$(OBJEXT) mmc.$(OBJEXT) \
3.91 - dreamcast.$(OBJEXT) eventq.$(OBJEXT) sh4.$(OBJEXT) \
3.92 - intc.$(OBJEXT) sh4mem.$(OBJEXT) timer.$(OBJEXT) dmac.$(OBJEXT) \
3.93 - mmu.$(OBJEXT) sh4core.$(OBJEXT) sh4dasm.$(OBJEXT) \
3.94 - sh4mmio.$(OBJEXT) scif.$(OBJEXT) sh4stat.$(OBJEXT) \
3.95 - xltcache.$(OBJEXT) pmm.$(OBJEXT) armcore.$(OBJEXT) \
3.96 - armdasm.$(OBJEXT) armmem.$(OBJEXT) aica.$(OBJEXT) \
3.97 - audio.$(OBJEXT) pvr2.$(OBJEXT) pvr2mem.$(OBJEXT) \
3.98 - tacore.$(OBJEXT) rendsort.$(OBJEXT) texcache.$(OBJEXT) \
3.99 - yuv.$(OBJEXT) rendsave.$(OBJEXT) scene.$(OBJEXT) \
3.100 - gl_sl.$(OBJEXT) gl_slsrc.$(OBJEXT) glutil.$(OBJEXT) \
3.101 - glrender.$(OBJEXT) maple.$(OBJEXT) controller.$(OBJEXT) \
3.102 - kbd.$(OBJEXT) mouse.$(OBJEXT) lightgun.$(OBJEXT) \
3.103 - loader.$(OBJEXT) bootstrap.$(OBJEXT) util.$(OBJEXT) \
3.104 - gdlist.$(OBJEXT) display.$(OBJEXT) audio_null.$(OBJEXT) \
3.105 - video_null.$(OBJEXT) video_gl.$(OBJEXT) gl_fbo.$(OBJEXT) \
3.106 - $(am__objects_1) $(am__objects_2) $(am__objects_3) \
3.107 - $(am__objects_4) $(am__objects_5) $(am__objects_6) \
3.108 - $(am__objects_7) $(am__objects_8) $(am__objects_9) \
3.109 - $(am__objects_10) $(am__objects_11) $(am__objects_12) \
3.110 - $(am__objects_13) $(am__objects_14)
3.111 + mem.$(OBJEXT) sdram.$(OBJEXT) paths.$(OBJEXT) watch.$(OBJEXT) \
3.112 + asic.$(OBJEXT) syscall.$(OBJEXT) bios.$(OBJEXT) \
3.113 + dcload.$(OBJEXT) ide.$(OBJEXT) gdimage.$(OBJEXT) \
3.114 + gdrom.$(OBJEXT) nrg.$(OBJEXT) cdi.$(OBJEXT) gdi.$(OBJEXT) \
3.115 + edc_ecc.$(OBJEXT) mmc.$(OBJEXT) dreamcast.$(OBJEXT) \
3.116 + eventq.$(OBJEXT) sh4.$(OBJEXT) intc.$(OBJEXT) sh4mem.$(OBJEXT) \
3.117 + timer.$(OBJEXT) dmac.$(OBJEXT) mmu.$(OBJEXT) sh4core.$(OBJEXT) \
3.118 + sh4dasm.$(OBJEXT) sh4mmio.$(OBJEXT) scif.$(OBJEXT) \
3.119 + sh4stat.$(OBJEXT) xltcache.$(OBJEXT) pmm.$(OBJEXT) \
3.120 + cache.$(OBJEXT) armcore.$(OBJEXT) armdasm.$(OBJEXT) \
3.121 + armmem.$(OBJEXT) aica.$(OBJEXT) audio.$(OBJEXT) pvr2.$(OBJEXT) \
3.122 + pvr2mem.$(OBJEXT) tacore.$(OBJEXT) rendsort.$(OBJEXT) \
3.123 + texcache.$(OBJEXT) yuv.$(OBJEXT) rendsave.$(OBJEXT) \
3.124 + scene.$(OBJEXT) gl_sl.$(OBJEXT) gl_slsrc.$(OBJEXT) \
3.125 + glutil.$(OBJEXT) glrender.$(OBJEXT) maple.$(OBJEXT) \
3.126 + controller.$(OBJEXT) kbd.$(OBJEXT) mouse.$(OBJEXT) \
3.127 + lightgun.$(OBJEXT) loader.$(OBJEXT) bootstrap.$(OBJEXT) \
3.128 + util.$(OBJEXT) gdlist.$(OBJEXT) display.$(OBJEXT) \
3.129 + audio_null.$(OBJEXT) video_null.$(OBJEXT) video_gl.$(OBJEXT) \
3.130 + gl_fbo.$(OBJEXT) $(am__objects_1) $(am__objects_2) \
3.131 + $(am__objects_3) $(am__objects_4) $(am__objects_5) \
3.132 + $(am__objects_6) $(am__objects_7) $(am__objects_8) \
3.133 + $(am__objects_9) $(am__objects_10) $(am__objects_11) \
3.134 + $(am__objects_12) $(am__objects_13) $(am__objects_14)
3.135 lxdream_OBJECTS = $(am_lxdream_OBJECTS)
3.136 lxdream_DEPENDENCIES = $(am__DEPENDENCIES_1)
3.137 am__test_testsh4x86_SOURCES_DIST = test/testsh4x86.c x86dasm/x86dasm.c \
3.138 x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
3.139 - x86dasm/dis-buf.c sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c \
3.140 - sh4/xltcache.c sh4/xltcache.h mem.c util.c sh4/mmu.c
3.141 + x86dasm/dis-buf.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
3.142 + sh4/xltcache.h mem.c util.c
3.143 @BUILD_SH4X86_TRUE@am_test_testsh4x86_OBJECTS = testsh4x86.$(OBJEXT) \
3.144 @BUILD_SH4X86_TRUE@ x86dasm.$(OBJEXT) i386-dis.$(OBJEXT) \
3.145 @BUILD_SH4X86_TRUE@ dis-init.$(OBJEXT) dis-buf.$(OBJEXT) \
3.146 -@BUILD_SH4X86_TRUE@ sh4dasm.$(OBJEXT) sh4trans.$(OBJEXT) \
3.147 -@BUILD_SH4X86_TRUE@ sh4x86.$(OBJEXT) xltcache.$(OBJEXT) \
3.148 -@BUILD_SH4X86_TRUE@ mem.$(OBJEXT) util.$(OBJEXT) mmu.$(OBJEXT)
3.149 +@BUILD_SH4X86_TRUE@ sh4trans.$(OBJEXT) sh4x86.$(OBJEXT) \
3.150 +@BUILD_SH4X86_TRUE@ xltcache.$(OBJEXT) mem.$(OBJEXT) \
3.151 +@BUILD_SH4X86_TRUE@ util.$(OBJEXT)
3.152 test_testsh4x86_OBJECTS = $(am_test_testsh4x86_OBJECTS)
3.153 test_testsh4x86_DEPENDENCIES =
3.154 am__dirstamp = $(am__leading_dot)dirstamp
3.155 @@ -400,18 +403,19 @@
3.156 gendec_SOURCES = tools/gendec.c tools/gendec.h tools/insparse.c tools/actparse.c
3.157 genglsl_SOURCES = tools/genglsl.c
3.158 lxdream_SOURCES = main.c version.c config.c config.h lxdream.h dream.h \
3.159 - gui.h cpu.h hook.h gettext.h mem.c mem.h mmio.h paths.c \
3.160 - watch.c asic.c asic.h clock.h serial.h syscall.c syscall.h \
3.161 - bios.c dcload.c gdrom/ide.c gdrom/ide.h gdrom/packet.h \
3.162 - gdrom/gdimage.c gdrom/gdrom.c gdrom/gdrom.h gdrom/nrg.c \
3.163 - gdrom/cdi.c gdrom/gdi.c gdrom/edc_ecc.c gdrom/ecc.h \
3.164 - gdrom/edc_crctable.h gdrom/edc_encoder.h gdrom/edc_l2sq.h \
3.165 - gdrom/edc_scramble.h gdrom/mmc.c gdrom/gddriver.h dreamcast.c \
3.166 - dreamcast.h eventq.c eventq.h sh4/sh4.c sh4/intc.c sh4/intc.h \
3.167 - sh4/sh4mem.c sh4/timer.c sh4/dmac.c sh4/mmu.c sh4/sh4core.c \
3.168 - sh4/sh4core.h sh4/sh4dasm.c sh4/sh4dasm.h sh4/sh4mmio.c \
3.169 - sh4/sh4mmio.h sh4/scif.c sh4/sh4stat.c sh4/sh4stat.h \
3.170 - sh4/xltcache.c sh4/xltcache.h sh4/sh4.h sh4/dmac.h sh4/pmm.c \
3.171 + gui.h cpu.h hook.h gettext.h mem.c mem.h sdram.c mmio.h \
3.172 + paths.c watch.c asic.c asic.h clock.h serial.h syscall.c \
3.173 + syscall.h bios.c dcload.c gdrom/ide.c gdrom/ide.h \
3.174 + gdrom/packet.h gdrom/gdimage.c gdrom/gdrom.c gdrom/gdrom.h \
3.175 + gdrom/nrg.c gdrom/cdi.c gdrom/gdi.c gdrom/edc_ecc.c \
3.176 + gdrom/ecc.h gdrom/edc_crctable.h gdrom/edc_encoder.h \
3.177 + gdrom/edc_l2sq.h gdrom/edc_scramble.h gdrom/mmc.c \
3.178 + gdrom/gddriver.h dreamcast.c dreamcast.h eventq.c eventq.h \
3.179 + sh4/sh4.c sh4/intc.c sh4/intc.h sh4/sh4mem.c sh4/timer.c \
3.180 + sh4/dmac.c sh4/mmu.c sh4/sh4core.c sh4/sh4core.h sh4/sh4dasm.c \
3.181 + sh4/sh4dasm.h sh4/sh4mmio.c sh4/sh4mmio.h sh4/scif.c \
3.182 + sh4/sh4stat.c sh4/sh4stat.h sh4/xltcache.c sh4/xltcache.h \
3.183 + sh4/sh4.h sh4/dmac.h sh4/pmm.c sh4/cache.c sh4/mmu.h \
3.184 aica/armcore.c aica/armcore.h aica/armdasm.c aica/armdasm.h \
3.185 aica/armmem.c aica/aica.c aica/aica.h aica/audio.c \
3.186 aica/audio.h pvr2/pvr2.c pvr2/pvr2.h pvr2/pvr2mem.c \
3.187 @@ -434,8 +438,8 @@
3.188 @BUILD_SH4X86_TRUE@test_testsh4x86_SOURCES = test/testsh4x86.c x86dasm/x86dasm.c \
3.189 @BUILD_SH4X86_TRUE@ x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
3.190 @BUILD_SH4X86_TRUE@ x86dasm/dis-buf.c \
3.191 -@BUILD_SH4X86_TRUE@ sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
3.192 -@BUILD_SH4X86_TRUE@ sh4/xltcache.h mem.c util.c sh4/mmu.c
3.193 +@BUILD_SH4X86_TRUE@ sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
3.194 +@BUILD_SH4X86_TRUE@ sh4/xltcache.h mem.c util.c
3.195
3.196 lxdream_LDADD = @GLIB_LIBS@ @GTK_LIBS@ @LIBPNG_LIBS@ @PULSE_LIBS@ @ESOUND_LIBS@ @ALSA_LIBS@ $(INTLLIBS)
3.197 gendec_LDADD = @GLIB_LIBS@ @GTK_LIBS@ $(INTLLIBS)
3.198 @@ -543,6 +547,7 @@
3.199 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/audio_pulse.Po@am__quote@
3.200 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bios.Po@am__quote@
3.201 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bootstrap.Po@am__quote@
3.202 +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cache.Po@am__quote@
3.203 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cd_linux.Po@am__quote@
3.204 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cd_none.Po@am__quote@
3.205 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cd_osx.Po@am__quote@
3.206 @@ -596,6 +601,7 @@
3.207 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mem.Po@am__quote@
3.208 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mmc.Po@am__quote@
3.209 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mmu.Po@am__quote@
3.210 +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mmux86.Po@am__quote@
3.211 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mouse.Po@am__quote@
3.212 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nrg.Po@am__quote@
3.213 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/osx_iokit.Po@am__quote@
3.214 @@ -607,6 +613,7 @@
3.215 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rendsort.Po@am__quote@
3.216 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scene.Po@am__quote@
3.217 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scif.Po@am__quote@
3.218 +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sdram.Po@am__quote@
3.219 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sh4.Po@am__quote@
3.220 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sh4core.Po@am__quote@
3.221 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sh4dasm.Po@am__quote@
3.222 @@ -999,6 +1006,20 @@
3.223 @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
3.224 @am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o pmm.obj `if test -f 'sh4/pmm.c'; then $(CYGPATH_W) 'sh4/pmm.c'; else $(CYGPATH_W) '$(srcdir)/sh4/pmm.c'; fi`
3.225
3.226 +cache.o: sh4/cache.c
3.227 +@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cache.o -MD -MP -MF "$(DEPDIR)/cache.Tpo" -c -o cache.o `test -f 'sh4/cache.c' || echo '$(srcdir)/'`sh4/cache.c; \
3.228 +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/cache.Tpo" "$(DEPDIR)/cache.Po"; else rm -f "$(DEPDIR)/cache.Tpo"; exit 1; fi
3.229 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='sh4/cache.c' object='cache.o' libtool=no @AMDEPBACKSLASH@
3.230 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
3.231 +@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cache.o `test -f 'sh4/cache.c' || echo '$(srcdir)/'`sh4/cache.c
3.232 +
3.233 +cache.obj: sh4/cache.c
3.234 +@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT cache.obj -MD -MP -MF "$(DEPDIR)/cache.Tpo" -c -o cache.obj `if test -f 'sh4/cache.c'; then $(CYGPATH_W) 'sh4/cache.c'; else $(CYGPATH_W) '$(srcdir)/sh4/cache.c'; fi`; \
3.235 +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/cache.Tpo" "$(DEPDIR)/cache.Po"; else rm -f "$(DEPDIR)/cache.Tpo"; exit 1; fi
3.236 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='sh4/cache.c' object='cache.obj' libtool=no @AMDEPBACKSLASH@
3.237 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
3.238 +@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o cache.obj `if test -f 'sh4/cache.c'; then $(CYGPATH_W) 'sh4/cache.c'; else $(CYGPATH_W) '$(srcdir)/sh4/cache.c'; fi`
3.239 +
3.240 armcore.o: aica/armcore.c
3.241 @am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT armcore.o -MD -MP -MF "$(DEPDIR)/armcore.Tpo" -c -o armcore.o `test -f 'aica/armcore.c' || echo '$(srcdir)/'`aica/armcore.c; \
3.242 @am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/armcore.Tpo" "$(DEPDIR)/armcore.Po"; else rm -f "$(DEPDIR)/armcore.Tpo"; exit 1; fi
3.243 @@ -1391,6 +1412,20 @@
3.244 @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
3.245 @am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o sh4trans.obj `if test -f 'sh4/sh4trans.c'; then $(CYGPATH_W) 'sh4/sh4trans.c'; else $(CYGPATH_W) '$(srcdir)/sh4/sh4trans.c'; fi`
3.246
3.247 +mmux86.o: sh4/mmux86.c
3.248 +@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mmux86.o -MD -MP -MF "$(DEPDIR)/mmux86.Tpo" -c -o mmux86.o `test -f 'sh4/mmux86.c' || echo '$(srcdir)/'`sh4/mmux86.c; \
3.249 +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/mmux86.Tpo" "$(DEPDIR)/mmux86.Po"; else rm -f "$(DEPDIR)/mmux86.Tpo"; exit 1; fi
3.250 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='sh4/mmux86.c' object='mmux86.o' libtool=no @AMDEPBACKSLASH@
3.251 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
3.252 +@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mmux86.o `test -f 'sh4/mmux86.c' || echo '$(srcdir)/'`sh4/mmux86.c
3.253 +
3.254 +mmux86.obj: sh4/mmux86.c
3.255 +@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mmux86.obj -MD -MP -MF "$(DEPDIR)/mmux86.Tpo" -c -o mmux86.obj `if test -f 'sh4/mmux86.c'; then $(CYGPATH_W) 'sh4/mmux86.c'; else $(CYGPATH_W) '$(srcdir)/sh4/mmux86.c'; fi`; \
3.256 +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/mmux86.Tpo" "$(DEPDIR)/mmux86.Po"; else rm -f "$(DEPDIR)/mmux86.Tpo"; exit 1; fi
3.257 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='sh4/mmux86.c' object='mmux86.obj' libtool=no @AMDEPBACKSLASH@
3.258 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
3.259 +@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mmux86.obj `if test -f 'sh4/mmux86.c'; then $(CYGPATH_W) 'sh4/mmux86.c'; else $(CYGPATH_W) '$(srcdir)/sh4/mmux86.c'; fi`
3.260 +
3.261 x86dasm.o: x86dasm/x86dasm.c
3.262 @am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86dasm.o -MD -MP -MF "$(DEPDIR)/x86dasm.Tpo" -c -o x86dasm.o `test -f 'x86dasm/x86dasm.c' || echo '$(srcdir)/'`x86dasm/x86dasm.c; \
3.263 @am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/x86dasm.Tpo" "$(DEPDIR)/x86dasm.Po"; else rm -f "$(DEPDIR)/x86dasm.Tpo"; exit 1; fi
4.1 --- a/src/aica/aica.c Mon Dec 15 10:44:56 2008 +0000
4.2 +++ b/src/aica/aica.c Tue Jan 13 11:56:28 2009 +0000
4.3 @@ -70,7 +70,6 @@
4.4 register_io_regions( mmio_list_spu );
4.5 MMIO_NOTRACE(AICA0);
4.6 MMIO_NOTRACE(AICA1);
4.7 - arm_mem_init();
4.8 aica_reset();
4.9 }
4.10
4.11 @@ -194,16 +193,18 @@
4.12 */
4.13
4.14 /* Write to channels 0-31 */
4.15 -void mmio_region_AICA0_write( uint32_t reg, uint32_t val )
4.16 +MMIO_REGION_WRITE_FN( AICA0, reg, val )
4.17 {
4.18 + reg &= 0xFFF;
4.19 MMIO_WRITE( AICA0, reg, val );
4.20 aica_write_channel( reg >> 7, reg % 128, val );
4.21 // DEBUG( "AICA0 Write %08X => %08X", val, reg );
4.22 }
4.23
4.24 /* Write to channels 32-64 */
4.25 -void mmio_region_AICA1_write( uint32_t reg, uint32_t val )
4.26 +MMIO_REGION_WRITE_FN( AICA1, reg, val )
4.27 {
4.28 + reg &= 0xFFF;
4.29 MMIO_WRITE( AICA1, reg, val );
4.30 aica_write_channel( (reg >> 7) + 32, reg % 128, val );
4.31 // DEBUG( "AICA1 Write %08X => %08X", val, reg );
4.32 @@ -212,10 +213,11 @@
4.33 /**
4.34 * AICA control registers
4.35 */
4.36 -void mmio_region_AICA2_write( uint32_t reg, uint32_t val )
4.37 +MMIO_REGION_WRITE_FN( AICA2, reg, val )
4.38 {
4.39 uint32_t tmp;
4.40 -
4.41 + reg &= 0xFFF;
4.42 +
4.43 switch( reg ) {
4.44 case AICA_RESET:
4.45 tmp = MMIO_READ( AICA2, AICA_RESET );
4.46 @@ -241,11 +243,12 @@
4.47 }
4.48 }
4.49
4.50 -int32_t mmio_region_AICA2_read( uint32_t reg )
4.51 +MMIO_REGION_READ_FN( AICA2, reg )
4.52 {
4.53 audio_channel_t channel;
4.54 uint32_t channo;
4.55 int32_t val;
4.56 + reg &= 0xFFF;
4.57 switch( reg ) {
4.58 case AICA_CHANSTATE:
4.59 channo = (MMIO_READ( AICA2, AICA_CHANSEL ) >> 8) & 0x3F;
4.60 @@ -266,9 +269,10 @@
4.61 }
4.62 }
4.63
4.64 -int32_t mmio_region_AICARTC_read( uint32_t reg )
4.65 +MMIO_REGION_READ_FN( AICARTC, reg )
4.66 {
4.67 int32_t rv = 0;
4.68 + reg &= 0xFFF;
4.69 switch( reg ) {
4.70 case AICA_RTCHI:
4.71 rv = (aica_state.time_of_day >> 16) & 0xFFFF;
4.72 @@ -281,9 +285,9 @@
4.73 return rv;
4.74 }
4.75
4.76 -
4.77 -void mmio_region_AICARTC_write( uint32_t reg, uint32_t val )
4.78 +MMIO_REGION_WRITE_FN( AICARTC, reg, val )
4.79 {
4.80 + reg &= 0xFFF;
4.81 switch( reg ) {
4.82 case AICA_RTCEN:
4.83 MMIO_WRITE( AICARTC, reg, val&0x01 );
5.1 --- a/src/aica/aica.h Mon Dec 15 10:44:56 2008 +0000
5.2 +++ b/src/aica/aica.h Tue Jan 13 11:56:28 2009 +0000
5.3 @@ -71,6 +71,10 @@
5.4 void aica_event( int event );
5.5 void aica_write_channel( int channel, uint32_t addr, uint32_t val );
5.6
5.7 +extern unsigned char aica_main_ram[];
5.8 +extern unsigned char aica_scratch_ram[];
5.9 +
5.10 +
5.11 /**
5.12 * The AICA core runs at 44100 samples/second, regardless of what we're
5.13 * actually outputing.
6.1 --- a/src/aica/armcore.h Mon Dec 15 10:44:56 2008 +0000
6.2 +++ b/src/aica/armcore.h Tue Jan 13 11:56:28 2009 +0000
6.3 @@ -109,7 +109,6 @@
6.4 void arm_write_byte_user( uint32_t addr, uint32_t val );
6.5 int32_t arm_read_phys_word( uint32_t addr );
6.6 int arm_has_page( uint32_t addr );
6.7 -void arm_mem_init(void);
6.8
6.9 #ifdef __cplusplus
6.10 }
7.1 --- a/src/aica/armmem.c Mon Dec 15 10:44:56 2008 +0000
7.2 +++ b/src/aica/armmem.c Tue Jan 13 11:56:28 2009 +0000
7.3 @@ -1,4 +1,4 @@
7.4 -/**
7.5 +/*`*
7.6 * $Id$
7.7 *
7.8 * Implements the ARM's memory map.
7.9 @@ -22,14 +22,95 @@
7.10 #include "aica.h"
7.11 #include "armcore.h"
7.12
7.13 -unsigned char *arm_mem = NULL;
7.14 -unsigned char *arm_mem_scratch = NULL;
7.15 +unsigned char aica_main_ram[2 MB];
7.16 +unsigned char aica_scratch_ram[8 KB];
7.17
7.18 -void arm_mem_init() {
7.19 - arm_mem = mem_get_region_by_name( MEM_REGION_AUDIO );
7.20 - arm_mem_scratch = mem_get_region_by_name( MEM_REGION_AUDIO_SCRATCH );
7.21 +/*************** ARM memory access function blocks **************/
7.22 +
7.23 +static int32_t FASTCALL ext_audioram_read_long( sh4addr_t addr )
7.24 +{
7.25 + return *((int32_t *)(aica_main_ram + (addr&0x001FFFFF)));
7.26 +}
7.27 +static int32_t FASTCALL ext_audioram_read_word( sh4addr_t addr )
7.28 +{
7.29 + return SIGNEXT16(*((int16_t *)(aica_main_ram + (addr&0x001FFFFF))));
7.30 +}
7.31 +static int32_t FASTCALL ext_audioram_read_byte( sh4addr_t addr )
7.32 +{
7.33 + return SIGNEXT8(*((int16_t *)(aica_main_ram + (addr&0x001FFFFF))));
7.34 +}
7.35 +static void FASTCALL ext_audioram_write_long( sh4addr_t addr, uint32_t val )
7.36 +{
7.37 + *(uint32_t *)(aica_main_ram + (addr&0x001FFFFF)) = val;
7.38 + asic_g2_write_word();
7.39 +}
7.40 +static void FASTCALL ext_audioram_write_word( sh4addr_t addr, uint32_t val )
7.41 +{
7.42 + *(uint16_t *)(aica_main_ram + (addr&0x001FFFFF)) = (uint16_t)val;
7.43 + asic_g2_write_word();
7.44 +}
7.45 +static void FASTCALL ext_audioram_write_byte( sh4addr_t addr, uint32_t val )
7.46 +{
7.47 + *(uint8_t *)(aica_main_ram + (addr&0x001FFFFF)) = (uint8_t)val;
7.48 + asic_g2_write_word();
7.49 +}
7.50 +static void FASTCALL ext_audioram_read_burst( unsigned char *dest, sh4addr_t addr )
7.51 +{
7.52 + memcpy( dest, aica_main_ram+(addr&0x001FFFFF), 32 );
7.53 +}
7.54 +static void FASTCALL ext_audioram_write_burst( sh4addr_t addr, unsigned char *src )
7.55 +{
7.56 + memcpy( aica_main_ram+(addr&0x001FFFFF), src, 32 );
7.57 }
7.58
7.59 +struct mem_region_fn mem_region_audioram = { ext_audioram_read_long, ext_audioram_write_long,
7.60 + ext_audioram_read_word, ext_audioram_write_word,
7.61 + ext_audioram_read_byte, ext_audioram_write_byte,
7.62 + ext_audioram_read_burst, ext_audioram_write_burst };
7.63 +
7.64 +
7.65 +static int32_t FASTCALL ext_audioscratch_read_long( sh4addr_t addr )
7.66 +{
7.67 + return *((int32_t *)(aica_scratch_ram + (addr&0x00001FFF)));
7.68 +}
7.69 +static int32_t FASTCALL ext_audioscratch_read_word( sh4addr_t addr )
7.70 +{
7.71 + return SIGNEXT16(*((int16_t *)(aica_scratch_ram + (addr&0x00001FFF))));
7.72 +}
7.73 +static int32_t FASTCALL ext_audioscratch_read_byte( sh4addr_t addr )
7.74 +{
7.75 + return SIGNEXT8(*((int16_t *)(aica_scratch_ram + (addr&0x00001FFF))));
7.76 +}
7.77 +static void FASTCALL ext_audioscratch_write_long( sh4addr_t addr, uint32_t val )
7.78 +{
7.79 + *(uint32_t *)(aica_scratch_ram + (addr&0x00001FFF)) = val;
7.80 + asic_g2_write_word();
7.81 +}
7.82 +static void FASTCALL ext_audioscratch_write_word( sh4addr_t addr, uint32_t val )
7.83 +{
7.84 + *(uint16_t *)(aica_scratch_ram + (addr&0x00001FFF)) = (uint16_t)val;
7.85 + asic_g2_write_word();
7.86 +}
7.87 +static void FASTCALL ext_audioscratch_write_byte( sh4addr_t addr, uint32_t val )
7.88 +{
7.89 + *(uint8_t *)(aica_scratch_ram + (addr&0x00001FFF)) = (uint8_t)val;
7.90 + asic_g2_write_word();
7.91 +}
7.92 +static void FASTCALL ext_audioscratch_read_burst( unsigned char *dest, sh4addr_t addr )
7.93 +{
7.94 + memcpy( dest, aica_scratch_ram+(addr&0x00001FFF), 32 );
7.95 +}
7.96 +static void FASTCALL ext_audioscratch_write_burst( sh4addr_t addr, unsigned char *src )
7.97 +{
7.98 + memcpy( aica_scratch_ram+(addr&0x00001FFF), src, 32 );
7.99 +}
7.100 +
7.101 +struct mem_region_fn mem_region_audioscratch = { ext_audioscratch_read_long, ext_audioscratch_write_long,
7.102 + ext_audioscratch_read_word, ext_audioscratch_write_word,
7.103 + ext_audioscratch_read_byte, ext_audioscratch_write_byte,
7.104 + ext_audioscratch_read_burst, ext_audioscratch_write_burst };
7.105 +
7.106 +/************************** Local ARM support **************************/
7.107 int arm_has_page( uint32_t addr ) {
7.108 return ( addr < 0x00200000 ||
7.109 (addr >= 0x00800000 && addr <= 0x00805000 ) );
7.110 @@ -37,7 +118,7 @@
7.111
7.112 uint32_t arm_read_long( uint32_t addr ) {
7.113 if( addr < 0x00200000 ) {
7.114 - return *(int32_t *)(arm_mem + addr);
7.115 + return *(int32_t *)(aica_main_ram + addr);
7.116 /* Main sound ram */
7.117 } else {
7.118 uint32_t val;
7.119 @@ -56,7 +137,7 @@
7.120 return val;
7.121 case 0x00803000:
7.122 case 0x00804000:
7.123 - return *(int32_t *)(arm_mem_scratch + addr - 0x00803000);
7.124 + return *(int32_t *)(aica_scratch_ram + addr - 0x00803000);
7.125 }
7.126 }
7.127 ERROR( "Attempted long read to undefined page: %08X at %08X",
7.128 @@ -77,7 +158,7 @@
7.129 {
7.130 if( addr < 0x00200000 ) {
7.131 /* Main sound ram */
7.132 - *(uint32_t *)(arm_mem + addr) = value;
7.133 + *(uint32_t *)(aica_main_ram + addr) = value;
7.134 } else {
7.135 switch( addr & 0xFFFFF000 ) {
7.136 case 0x00800000:
7.137 @@ -94,7 +175,7 @@
7.138 break;
7.139 case 0x00803000:
7.140 case 0x00804000:
7.141 - *(uint32_t *)(arm_mem_scratch + addr - 0x00803000) = value;
7.142 + *(uint32_t *)(aica_scratch_ram + addr - 0x00803000) = value;
7.143 break;
7.144 default:
7.145 ERROR( "Attempted long write to undefined address: %08X",
7.146 @@ -123,7 +204,7 @@
7.147 void arm_write_word( uint32_t addr, uint32_t value )
7.148 {
7.149 if( addr < 0x00200000 ) {
7.150 - *(uint16_t *)(arm_mem + addr) = (uint16_t)value;
7.151 + *(uint16_t *)(aica_main_ram + addr) = (uint16_t)value;
7.152 } else {
7.153
7.154 }
7.155 @@ -132,7 +213,7 @@
7.156 {
7.157 if( addr < 0x00200000 ) {
7.158 /* Main sound ram */
7.159 - *(uint8_t *)(arm_mem + addr) = (uint8_t)value;
7.160 + *(uint8_t *)(aica_main_ram + addr) = (uint8_t)value;
7.161 } else {
7.162 uint32_t tmp;
7.163 switch( addr & 0xFFFFF000 ) {
7.164 @@ -153,7 +234,7 @@
7.165 break;
7.166 case 0x00803000:
7.167 case 0x00804000:
7.168 - *(uint8_t *)(arm_mem_scratch + addr - 0x00803000) = (uint8_t)value;
7.169 + *(uint8_t *)(aica_scratch_ram + addr - 0x00803000) = (uint8_t)value;
7.170 break;
7.171 default:
7.172 ERROR( "Attempted byte write to undefined address: %08X",
8.1 --- a/src/aica/audio.c Mon Dec 15 10:44:56 2008 +0000
8.2 +++ b/src/aica/audio.c Tue Jan 13 11:56:28 2009 +0000
8.3 @@ -68,8 +68,6 @@
8.4
8.5 #define NEXT_BUFFER() ((audio.write_buffer == NUM_BUFFERS-1) ? 0 : audio.write_buffer+1)
8.6
8.7 -extern char *arm_mem;
8.8 -
8.9 /**
8.10 * Preserve audio channel state only - don't bother saving the buffers
8.11 */
8.12 @@ -299,7 +297,7 @@
8.13 switch( channel->sample_format ) {
8.14 case AUDIO_FMT_16BIT:
8.15 for( j=0; j<num_samples; j++ ) {
8.16 - sample = ((int16_t *)(arm_mem + channel->start))[channel->posn];
8.17 + sample = ((int16_t *)(aica_main_ram + channel->start))[channel->posn];
8.18 result_buf[j][0] += sample * vol_left;
8.19 result_buf[j][1] += sample * vol_right;
8.20
8.21 @@ -323,7 +321,7 @@
8.22 break;
8.23 case AUDIO_FMT_8BIT:
8.24 for( j=0; j<num_samples; j++ ) {
8.25 - sample = ((int8_t *)(arm_mem + channel->start))[channel->posn] << 8;
8.26 + sample = ((int8_t *)(aica_main_ram + channel->start))[channel->posn] << 8;
8.27 result_buf[j][0] += sample * vol_left;
8.28 result_buf[j][1] += sample * vol_right;
8.29
8.30 @@ -366,7 +364,7 @@
8.31 break;
8.32 }
8.33 }
8.34 - uint8_t data = ((uint8_t *)(arm_mem + channel->start))[channel->posn>>1];
8.35 + uint8_t data = ((uint8_t *)(aica_main_ram + channel->start))[channel->posn>>1];
8.36 if( channel->posn&1 ) {
8.37 adpcm_yamaha_decode_nibble( channel, (data >> 4) & 0x0F );
8.38 } else {
8.39 @@ -478,7 +476,7 @@
8.40 if( audio.channels[channel].sample_format == AUDIO_FMT_ADPCM ) {
8.41 audio.channels[channel].adpcm_step = 0;
8.42 audio.channels[channel].adpcm_predict = 0;
8.43 - uint8_t data = ((uint8_t *)(arm_mem + audio.channels[channel].start))[0];
8.44 + uint8_t data = ((uint8_t *)(aica_main_ram + audio.channels[channel].start))[0];
8.45 adpcm_yamaha_decode_nibble( &audio.channels[channel], data & 0x0F );
8.46 }
8.47 }
9.1 --- a/src/asic.c Mon Dec 15 10:44:56 2008 +0000
9.2 +++ b/src/asic.c Tue Jan 13 11:56:28 2009 +0000
9.3 @@ -416,8 +416,38 @@
9.4 MMIO_WRITE( ASIC, SORTDMACTL, 0 );
9.5 }
9.6
9.7 -void mmio_region_ASIC_write( uint32_t reg, uint32_t val )
9.8 +MMIO_REGION_READ_FN( ASIC, reg )
9.9 {
9.10 + int32_t val;
9.11 + reg &= 0xFFF;
9.12 + switch( reg ) {
9.13 + case PIRQ0:
9.14 + case PIRQ1:
9.15 + case PIRQ2:
9.16 + case IRQA0:
9.17 + case IRQA1:
9.18 + case IRQA2:
9.19 + case IRQB0:
9.20 + case IRQB1:
9.21 + case IRQB2:
9.22 + case IRQC0:
9.23 + case IRQC1:
9.24 + case IRQC2:
9.25 + case MAPLE_STATE:
9.26 + val = MMIO_READ(ASIC, reg);
9.27 + return val;
9.28 + case G2STATUS:
9.29 + return g2_read_status();
9.30 + default:
9.31 + val = MMIO_READ(ASIC, reg);
9.32 + return val;
9.33 + }
9.34 +
9.35 +}
9.36 +
9.37 +MMIO_REGION_WRITE_FN( ASIC, reg, val )
9.38 +{
9.39 + reg &= 0xFFF;
9.40 switch( reg ) {
9.41 case PIRQ1:
9.42 break; /* Treat this as read-only for the moment */
9.43 @@ -496,36 +526,37 @@
9.44 }
9.45 }
9.46
9.47 -int32_t mmio_region_ASIC_read( uint32_t reg )
9.48 +MMIO_REGION_READ_FN( EXTDMA, reg )
9.49 {
9.50 - int32_t val;
9.51 + uint32_t val;
9.52 + reg &= 0xFFF;
9.53 + if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
9.54 + return 0xFFFFFFFF; /* disabled */
9.55 + }
9.56 +
9.57 switch( reg ) {
9.58 - case PIRQ0:
9.59 - case PIRQ1:
9.60 - case PIRQ2:
9.61 - case IRQA0:
9.62 - case IRQA1:
9.63 - case IRQA2:
9.64 - case IRQB0:
9.65 - case IRQB1:
9.66 - case IRQB2:
9.67 - case IRQC0:
9.68 - case IRQC1:
9.69 - case IRQC2:
9.70 - case MAPLE_STATE:
9.71 - val = MMIO_READ(ASIC, reg);
9.72 - return val;
9.73 - case G2STATUS:
9.74 - return g2_read_status();
9.75 + case IDEALTSTATUS:
9.76 + val = idereg.status;
9.77 + return val;
9.78 + case IDEDATA: return ide_read_data_pio( );
9.79 + case IDEFEAT: return idereg.error;
9.80 + case IDECOUNT:return idereg.count;
9.81 + case IDELBA0: return ide_get_drive_status();
9.82 + case IDELBA1: return idereg.lba1;
9.83 + case IDELBA2: return idereg.lba2;
9.84 + case IDEDEV: return idereg.device;
9.85 + case IDECMD:
9.86 + val = ide_read_status();
9.87 + return val;
9.88 default:
9.89 - val = MMIO_READ(ASIC, reg);
9.90 + val = MMIO_READ( EXTDMA, reg );
9.91 return val;
9.92 }
9.93 -
9.94 }
9.95
9.96 MMIO_REGION_WRITE_FN( EXTDMA, reg, val )
9.97 {
9.98 + reg &= 0xFFF;
9.99 if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
9.100 return; /* disabled */
9.101 }
9.102 @@ -645,30 +676,3 @@
9.103 }
9.104 }
9.105
9.106 -MMIO_REGION_READ_FN( EXTDMA, reg )
9.107 -{
9.108 - uint32_t val;
9.109 - if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) {
9.110 - return 0xFFFFFFFF; /* disabled */
9.111 - }
9.112 -
9.113 - switch( reg ) {
9.114 - case IDEALTSTATUS:
9.115 - val = idereg.status;
9.116 - return val;
9.117 - case IDEDATA: return ide_read_data_pio( );
9.118 - case IDEFEAT: return idereg.error;
9.119 - case IDECOUNT:return idereg.count;
9.120 - case IDELBA0: return ide_get_drive_status();
9.121 - case IDELBA1: return idereg.lba1;
9.122 - case IDELBA2: return idereg.lba2;
9.123 - case IDEDEV: return idereg.device;
9.124 - case IDECMD:
9.125 - val = ide_read_status();
9.126 - return val;
9.127 - default:
9.128 - val = MMIO_READ( EXTDMA, reg );
9.129 - return val;
9.130 - }
9.131 -}
9.132 -
10.1 --- a/src/dreamcast.c Mon Dec 15 10:44:56 2008 +0000
10.2 +++ b/src/dreamcast.c Tue Jan 13 11:56:28 2009 +0000
10.3 @@ -29,6 +29,7 @@
10.4 #include "aica/aica.h"
10.5 #include "gdrom/ide.h"
10.6 #include "maple/maple.h"
10.7 +#include "pvr2/pvr2.h"
10.8 #include "sh4/sh4.h"
10.9 #include "sh4/sh4core.h"
10.10
10.11 @@ -58,6 +59,22 @@
10.12 struct dreamcast_module unknown_module = { "****", NULL, NULL, NULL, NULL,
10.13 NULL, NULL, NULL };
10.14
10.15 +extern struct mem_region_fn mem_region_sdram;
10.16 +extern struct mem_region_fn mem_region_vram32;
10.17 +extern struct mem_region_fn mem_region_vram64;
10.18 +extern struct mem_region_fn mem_region_audioram;
10.19 +extern struct mem_region_fn mem_region_audioscratch;
10.20 +extern struct mem_region_fn mem_region_flashram;
10.21 +extern struct mem_region_fn mem_region_bootrom;
10.22 +extern struct mem_region_fn mem_region_pvr2ta;
10.23 +extern struct mem_region_fn mem_region_pvr2yuv;
10.24 +extern struct mem_region_fn mem_region_pvr2vdma1;
10.25 +extern struct mem_region_fn mem_region_pvr2vdma2;
10.26 +
10.27 +unsigned char dc_main_ram[16 MB];
10.28 +unsigned char dc_boot_rom[2 MB];
10.29 +unsigned char dc_flash_ram[128 KB];
10.30 +
10.31 /**
10.32 * This function is responsible for defining how all the pieces of the
10.33 * dreamcast actually fit together.
10.34 @@ -75,12 +92,19 @@
10.35 dreamcast_register_module( &mem_module );
10.36
10.37 /* Setup standard memory map */
10.38 - mem_create_repeating_ram_region( 0x0C000000, 16 MB, MEM_REGION_MAIN, 0x01000000, 0x0F000000 );
10.39 - mem_create_ram_region( 0x00800000, 2 MB, MEM_REGION_AUDIO );
10.40 - mem_create_ram_region( 0x00703000, 8 KB, MEM_REGION_AUDIO_SCRATCH );
10.41 - mem_create_ram_region( 0x05000000, 8 MB, MEM_REGION_VIDEO );
10.42 - dreamcast_has_bios = mem_load_rom( bios_path, 0x00000000, 0x00200000, 0x89f2b1a1, MEM_REGION_BIOS );
10.43 - mem_create_ram_region( 0x00200000, 0x00020000, MEM_REGION_FLASH );
10.44 + mem_map_region( dc_boot_rom, 0x00000000, 2 MB, MEM_REGION_BIOS, &mem_region_bootrom, MEM_FLAG_ROM, 2 MB, 0 );
10.45 + mem_map_region( dc_flash_ram, 0x00200000, 128 KB, MEM_REGION_FLASH, &mem_region_flashram, MEM_FLAG_RAM, 128 KB, 0 );
10.46 + mem_map_region( aica_main_ram, 0x00800000, 2 MB, MEM_REGION_AUDIO, &mem_region_audioram, MEM_FLAG_RAM, 2 MB, 0 );
10.47 + mem_map_region( aica_scratch_ram,0x00703000, 8 KB, MEM_REGION_AUDIO_SCRATCH,&mem_region_audioscratch, MEM_FLAG_RAM, 8 KB, 0 );
10.48 + mem_map_region( NULL, 0x04000000, 8 MB, MEM_REGION_VIDEO64, &mem_region_vram64, 0, 8 MB, 0 );
10.49 + mem_map_region( pvr2_main_ram, 0x05000000, 8 MB, MEM_REGION_VIDEO, &mem_region_vram32, MEM_FLAG_RAM, 8 MB, 0 );
10.50 + mem_map_region( dc_main_ram, 0x0C000000, 16 MB, MEM_REGION_MAIN, &mem_region_sdram, MEM_FLAG_RAM, 0x01000000, 0x0F000000 );
10.51 + mem_map_region( NULL, 0x10000000, 8 MB, MEM_REGION_PVR2TA, &mem_region_pvr2ta, 0, 0x02000000, 0x12000000 );
10.52 + mem_map_region( NULL, 0x10800000, 8 MB, MEM_REGION_PVR2YUV, &mem_region_pvr2yuv, 0, 0x02000000, 0x12800000 );
10.53 + mem_map_region( NULL, 0x11000000, 16 MB, MEM_REGION_PVR2VDMA1, &mem_region_pvr2vdma1, 0, 16 MB, 0 );
10.54 + mem_map_region( NULL, 0x13000000, 16 MB, MEM_REGION_PVR2VDMA2, &mem_region_pvr2vdma2, 0, 16 MB, 0 );
10.55 +
10.56 + dreamcast_has_bios = mem_load_rom( dc_boot_rom, bios_path, 2 MB, 0x89f2b1a1 );
10.57 if( flash_path != NULL && flash_path[0] != '\0' ) {
10.58 mem_load_block( flash_path, 0x00200000, 0x00020000 );
10.59 }
10.60 @@ -99,7 +123,7 @@
10.61 {
10.62 const char *bios_path = lxdream_get_config_value(CONFIG_BIOS_PATH);
10.63 const char *flash_path = lxdream_get_config_value(CONFIG_FLASH_PATH);
10.64 - dreamcast_has_bios = mem_load_rom( bios_path, 0x00000000, 0x00200000, 0x89f2b1a1, MEM_REGION_BIOS );
10.65 + dreamcast_has_bios = mem_load_rom( dc_boot_rom, bios_path, 2 MB, 0x89f2b1a1 );
10.66 if( flash_path != NULL && flash_path[0] != '\0' ) {
10.67 mem_load_block( flash_path, 0x00200000, 0x00020000 );
10.68 }
10.69 @@ -120,8 +144,8 @@
10.70 void dreamcast_configure_aica_only( )
10.71 {
10.72 dreamcast_register_module( &mem_module );
10.73 - mem_create_ram_region( 0x00800000, 2 MB, MEM_REGION_AUDIO );
10.74 - mem_create_ram_region( 0x00703000, 8 KB, MEM_REGION_AUDIO_SCRATCH );
10.75 + mem_map_region( aica_main_ram, 0x00800000, 2 MB, MEM_REGION_AUDIO, &mem_region_audioram, MEM_FLAG_RAM, 2 MB, 0 );
10.76 + mem_map_region( aica_scratch_ram, 0x00703000, 8 KB, MEM_REGION_AUDIO_SCRATCH, &mem_region_audioscratch, MEM_FLAG_RAM, 8 KB, 0 );
10.77 dreamcast_register_module( &aica_module );
10.78 aica_enable();
10.79 dreamcast_state = STATE_STOPPED;
10.80 @@ -253,6 +277,7 @@
10.81 dreamcast_program_name = g_strdup(name);
10.82 dreamcast_entry_point = entry_point;
10.83 sh4_set_pc(entry_point);
10.84 + sh4_write_sr( sh4_read_sr() & (~SR_BL) ); /* Unmask interrupts */
10.85 bios_install();
10.86 dcload_install();
10.87 gui_update_state();
10.88 @@ -467,3 +492,69 @@
10.89 return 0;
10.90 }
10.91
10.92 +/********************* The Boot ROM address space **********************/
10.93 +static int32_t FASTCALL ext_bootrom_read_long( sh4addr_t addr )
10.94 +{
10.95 + return *((int32_t *)(dc_boot_rom + (addr&0x001FFFFF)));
10.96 +}
10.97 +static int32_t FASTCALL ext_bootrom_read_word( sh4addr_t addr )
10.98 +{
10.99 + return SIGNEXT16(*((int16_t *)(dc_boot_rom + (addr&0x001FFFFF))));
10.100 +}
10.101 +static int32_t FASTCALL ext_bootrom_read_byte( sh4addr_t addr )
10.102 +{
10.103 + return SIGNEXT8(*((int16_t *)(dc_boot_rom + (addr&0x001FFFFF))));
10.104 +}
10.105 +static void FASTCALL ext_bootrom_read_burst( unsigned char *dest, sh4addr_t addr )
10.106 +{
10.107 + memcpy( dest, dc_boot_rom +(addr&0x001FFFFF), 32 );
10.108 +}
10.109 +
10.110 +struct mem_region_fn mem_region_bootrom = {
10.111 + ext_bootrom_read_long, unmapped_write_long,
10.112 + ext_bootrom_read_word, unmapped_write_long,
10.113 + ext_bootrom_read_byte, unmapped_write_long,
10.114 + ext_bootrom_read_burst, unmapped_write_burst };
10.115 +
10.116 +/********************* The Flash RAM address space **********************/
10.117 +static int32_t FASTCALL ext_flashram_read_long( sh4addr_t addr )
10.118 +{
10.119 + return *((int32_t *)(dc_flash_ram + (addr&0x0001FFFF)));
10.120 +}
10.121 +static int32_t FASTCALL ext_flashram_read_word( sh4addr_t addr )
10.122 +{
10.123 + return SIGNEXT16(*((int16_t *)(dc_flash_ram + (addr&0x0001FFFF))));
10.124 +}
10.125 +static int32_t FASTCALL ext_flashram_read_byte( sh4addr_t addr )
10.126 +{
10.127 + return SIGNEXT8(*((int16_t *)(dc_flash_ram + (addr&0x0001FFFF))));
10.128 +}
10.129 +static void FASTCALL ext_flashram_write_long( sh4addr_t addr, uint32_t val )
10.130 +{
10.131 + *(uint32_t *)(dc_flash_ram + (addr&0x0001FFFF)) = val;
10.132 + asic_g2_write_word();
10.133 +}
10.134 +static void FASTCALL ext_flashram_write_word( sh4addr_t addr, uint32_t val )
10.135 +{
10.136 + *(uint16_t *)(dc_flash_ram + (addr&0x0001FFFF)) = (uint16_t)val;
10.137 + asic_g2_write_word();
10.138 +}
10.139 +static void FASTCALL ext_flashram_write_byte( sh4addr_t addr, uint32_t val )
10.140 +{
10.141 + *(uint8_t *)(dc_flash_ram + (addr&0x0001FFFF)) = (uint8_t)val;
10.142 + asic_g2_write_word();
10.143 +}
10.144 +static void FASTCALL ext_flashram_read_burst( unsigned char *dest, sh4addr_t addr )
10.145 +{
10.146 + memcpy( dest, dc_flash_ram+(addr&0x0001FFFF), 32 );
10.147 +}
10.148 +static void FASTCALL ext_flashram_write_burst( sh4addr_t addr, unsigned char *src )
10.149 +{
10.150 + memcpy( dc_flash_ram+(addr&0x0001FFFF), src, 32 );
10.151 +}
10.152 +
10.153 +struct mem_region_fn mem_region_flashram = { ext_flashram_read_long, ext_flashram_write_long,
10.154 + ext_flashram_read_word, ext_flashram_write_word,
10.155 + ext_flashram_read_byte, ext_flashram_write_byte,
10.156 + ext_flashram_read_burst, ext_flashram_write_burst };
10.157 +
11.1 --- a/src/dreamcast.h Mon Dec 15 10:44:56 2008 +0000
11.2 +++ b/src/dreamcast.h Tue Jan 13 11:56:28 2009 +0000
11.3 @@ -61,7 +61,7 @@
11.4 void dreamcast_program_loaded( const gchar *name, sh4addr_t entry_point );
11.5
11.6 #define DREAMCAST_SAVE_MAGIC "%!-lxDream!Save\0"
11.7 -#define DREAMCAST_SAVE_VERSION 0x00010005
11.8 +#define DREAMCAST_SAVE_VERSION 0x00010006
11.9
11.10 int dreamcast_save_state( const gchar *filename );
11.11 int dreamcast_load_state( const gchar *filename );
11.12 @@ -78,6 +78,10 @@
11.13 #define SCENE_SAVE_MAGIC "%!-lxDream!Scene"
11.14 #define SCENE_SAVE_VERSION 0x00010000
11.15
11.16 +extern unsigned char dc_main_ram[];
11.17 +extern unsigned char dc_boot_rom[];
11.18 +extern unsigned char dc_flash_ram[];
11.19 +
11.20 #ifdef __cplusplus
11.21 }
11.22 #endif
12.1 --- a/src/lxdream.h Mon Dec 15 10:44:56 2008 +0000
12.2 +++ b/src/lxdream.h Tue Jan 13 11:56:28 2009 +0000
12.3 @@ -92,7 +92,7 @@
12.4 const char *get_locale_path();
12.5
12.6 #ifdef HAVE_FASTCALL
12.7 -#define FASTCALL __attribute__((regparm(3)))
12.8 +#define FASTCALL __attribute__((regparm(2)))
12.9 #else
12.10 #define FASTCALL
12.11 #endif
13.1 --- a/src/mem.c Mon Dec 15 10:44:56 2008 +0000
13.2 +++ b/src/mem.c Tue Jan 13 11:56:28 2009 +0000
13.3 @@ -35,7 +35,14 @@
13.4 #include "mmio.h"
13.5 #include "dreamcast.h"
13.6
13.7 +#ifndef PAGE_SIZE
13.8 +#define PAGE_SIZE 4096
13.9 +#endif
13.10 +
13.11 sh4ptr_t *page_map = NULL;
13.12 +mem_region_fn_t *ext_address_space = NULL;
13.13 +
13.14 +extern struct mem_region_fn mem_region_unmapped;
13.15
13.16 int mem_load(FILE *f);
13.17 void mem_save(FILE *f);
13.18 @@ -48,6 +55,41 @@
13.19
13.20 uint32_t num_io_rgns = 0, num_mem_rgns = 0;
13.21
13.22 +DEFINE_HOOK( mem_page_remapped_hook, mem_page_remapped_hook_t );
13.23 +static void mem_page_remapped( sh4addr_t addr, mem_region_fn_t fn )
13.24 +{
13.25 + CALL_HOOKS( mem_page_remapped_hook, addr, fn );
13.26 +}
13.27 +
13.28 +/********************* The "unmapped" address space ********************/
13.29 +/* Always reads as 0, writes have no effect */
13.30 +int32_t FASTCALL unmapped_read_long( sh4addr_t addr )
13.31 +{
13.32 + return 0;
13.33 +}
13.34 +void FASTCALL unmapped_write_long( sh4addr_t addr, uint32_t val )
13.35 +{
13.36 +}
13.37 +void FASTCALL unmapped_read_burst( unsigned char *dest, sh4addr_t addr )
13.38 +{
13.39 + memset( dest, 0, 32 );
13.40 +}
13.41 +void FASTCALL unmapped_write_burst( sh4addr_t addr, unsigned char *src )
13.42 +{
13.43 +}
13.44 +
13.45 +void FASTCALL unmapped_prefetch( sh4addr_t addr )
13.46 +{
13.47 + /* No effect */
13.48 +}
13.49 +
13.50 +struct mem_region_fn mem_region_unmapped = {
13.51 + unmapped_read_long, unmapped_write_long,
13.52 + unmapped_read_long, unmapped_write_long,
13.53 + unmapped_read_long, unmapped_write_long,
13.54 + unmapped_read_burst, unmapped_write_burst,
13.55 + unmapped_prefetch };
13.56 +
13.57 void *mem_alloc_pages( int n )
13.58 {
13.59 void *mem = mmap( NULL, n * 4096,
13.60 @@ -59,18 +101,39 @@
13.61 return mem;
13.62 }
13.63
13.64 +void mem_unprotect( void *region, uint32_t size )
13.65 +{
13.66 + /* Force page alignment */
13.67 + uintptr_t i = (uintptr_t)region;
13.68 + uintptr_t mask = ~(PAGE_SIZE-1);
13.69 + void *ptr = (void *)(i & mask);
13.70 + size_t len = i & (PAGE_SIZE-1) + size;
13.71 + len = (len + (PAGE_SIZE-1)) & mask;
13.72 +
13.73 + int status = mprotect( ptr, len, PROT_READ|PROT_WRITE|PROT_EXEC );
13.74 + assert( status == 0 );
13.75 +}
13.76
13.77 void mem_init( void )
13.78 {
13.79 + int i;
13.80 + mem_region_fn_t *ptr;
13.81 page_map = mmap( NULL, sizeof(sh4ptr_t) * LXDREAM_PAGE_TABLE_ENTRIES,
13.82 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0 );
13.83 if( page_map == MAP_FAILED ) {
13.84 - ERROR( "Unable to allocate page map! (%s)", strerror(errno) );
13.85 - page_map = NULL;
13.86 - return;
13.87 + FATAL( "Unable to allocate page map! (%s)", strerror(errno) );
13.88 + }
13.89 + memset( page_map, 0, sizeof(sh4ptr_t) * LXDREAM_PAGE_TABLE_ENTRIES );
13.90 +
13.91 + ext_address_space = mmap( NULL, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES,
13.92 + PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0 );
13.93 + if( ext_address_space == MAP_FAILED ) {
13.94 + FATAL( "Unable to allocate external memory map (%s)", strerror(errno) );
13.95 }
13.96
13.97 - memset( page_map, 0, sizeof(sh4ptr_t) * LXDREAM_PAGE_TABLE_ENTRIES );
13.98 + for( ptr = ext_address_space, i = LXDREAM_PAGE_TABLE_ENTRIES; i > 0; ptr++, i-- ) {
13.99 + *ptr = &mem_region_unmapped;
13.100 + }
13.101 }
13.102
13.103 void mem_reset( void )
13.104 @@ -81,7 +144,7 @@
13.105 for( j=0; io_rgn[i]->ports[j].id != NULL; j++ ) {
13.106 if( io_rgn[i]->ports[j].def_val != UNDEFINED &&
13.107 io_rgn[i]->ports[j].def_val != *io_rgn[i]->ports[j].val ) {
13.108 - io_rgn[i]->io_write( io_rgn[i]->ports[j].offset,
13.109 + io_rgn[i]->fn.write_long( io_rgn[i]->ports[j].offset,
13.110 io_rgn[i]->ports[j].def_val );
13.111 }
13.112 }
13.113 @@ -90,18 +153,26 @@
13.114
13.115 void mem_save( FILE *f )
13.116 {
13.117 - int i;
13.118 + int i, num_ram_regions = 0;
13.119 uint32_t len;
13.120
13.121 - /* All memory regions */
13.122 - fwrite( &num_mem_rgns, sizeof(num_mem_rgns), 1, f );
13.123 + /* All RAM regions (ROM and non-memory regions don't need to be saved)
13.124 + * Flash is questionable - for now we save it too */
13.125 for( i=0; i<num_mem_rgns; i++ ) {
13.126 - fwrite_string( mem_rgn[i].name, f );
13.127 - fwrite( &mem_rgn[i].base, sizeof(uint32_t), 1, f );
13.128 - fwrite( &mem_rgn[i].flags, sizeof(uint32_t), 1, f );
13.129 - fwrite( &mem_rgn[i].size, sizeof(uint32_t), 1, f );
13.130 - if( mem_rgn[i].flags != MEM_FLAG_ROM )
13.131 + if( mem_rgn[i].flags == MEM_FLAG_RAM ) {
13.132 + num_ram_regions++;
13.133 + }
13.134 + }
13.135 + fwrite( &num_ram_regions, sizeof(num_ram_regions), 1, f );
13.136 +
13.137 + for( i=0; i<num_mem_rgns; i++ ) {
13.138 + if( mem_rgn[i].flags == MEM_FLAG_RAM ) {
13.139 + fwrite_string( mem_rgn[i].name, f );
13.140 + fwrite( &mem_rgn[i].base, sizeof(uint32_t), 1, f );
13.141 + fwrite( &mem_rgn[i].flags, sizeof(uint32_t), 1, f );
13.142 + fwrite( &mem_rgn[i].size, sizeof(uint32_t), 1, f );
13.143 fwrite_gzip( mem_rgn[i].mem, mem_rgn[i].size, 1, f );
13.144 + }
13.145 }
13.146
13.147 /* All MMIO regions */
13.148 @@ -121,32 +192,51 @@
13.149 uint32_t len;
13.150 uint32_t base, size;
13.151 uint32_t flags;
13.152 - int i;
13.153 + int i, j;
13.154 + int mem_region_loaded[MAX_MEM_REGIONS];
13.155
13.156 - /* All memory regions */
13.157 + /* All RAM regions */
13.158 + memset( mem_region_loaded, 0, sizeof(mem_region_loaded) );
13.159 fread( &len, sizeof(len), 1, f );
13.160 - if( len != num_mem_rgns )
13.161 - return -1;
13.162 for( i=0; i<len; i++ ) {
13.163 fread_string( tmp, sizeof(tmp), f );
13.164 - fread( &base, sizeof(base), 1, f );
13.165 - fread( &flags, sizeof(flags), 1, f );
13.166 - fread( &size, sizeof(size), 1, f );
13.167 - if( strcmp( mem_rgn[i].name, tmp ) != 0 ||
13.168 - base != mem_rgn[i].base ||
13.169 - flags != mem_rgn[i].flags ||
13.170 - size != mem_rgn[i].size ) {
13.171 - ERROR( "Bad memory region %d %s", i, tmp );
13.172 +
13.173 + for( j=0; j<num_mem_rgns; j++ ) {
13.174 + if( strcasecmp( mem_rgn[j].name, tmp ) == 0 ) {
13.175 + fread( &base, sizeof(base), 1, f );
13.176 + fread( &flags, sizeof(flags), 1, f );
13.177 + fread( &size, sizeof(size), 1, f );
13.178 + if( base != mem_rgn[j].base ||
13.179 + flags != mem_rgn[j].flags ||
13.180 + size != mem_rgn[j].size ) {
13.181 + ERROR( "Bad memory block %d %s (not mapped to expected region)", i, tmp );
13.182 + return -1;
13.183 + }
13.184 + if( flags != MEM_FLAG_RAM ) {
13.185 + ERROR( "Unexpected memory block %d %s (Not a RAM region)", i, tmp );
13.186 + return -1;
13.187 + }
13.188 + fread_gzip( mem_rgn[j].mem, size, 1, f );
13.189 + mem_region_loaded[j] = 1;
13.190 + }
13.191 + }
13.192 + }
13.193 + /* Make sure we got all the memory regions we expected */
13.194 + for( i=0; i<num_mem_rgns; i++ ) {
13.195 + if( mem_rgn[i].flags == MEM_FLAG_RAM &&
13.196 + mem_region_loaded[i] == 0 ) {
13.197 + ERROR( "Missing memory block %s (not found in save state)", mem_rgn[i].name );
13.198 return -1;
13.199 }
13.200 - if( flags != MEM_FLAG_ROM )
13.201 - fread_gzip( mem_rgn[i].mem, size, 1, f );
13.202 }
13.203
13.204 /* All MMIO regions */
13.205 fread( &len, sizeof(len), 1, f );
13.206 - if( len != num_io_rgns )
13.207 + if( len != num_io_rgns ) {
13.208 + ERROR( "Unexpected IO region count %d (expected %d)", len, num_io_rgns );
13.209 return -1;
13.210 + }
13.211 +
13.212 for( i=0; i<len; i++ ) {
13.213 fread_string( tmp, sizeof(tmp), f );
13.214 fread( &base, sizeof(base), 1, f );
13.215 @@ -225,8 +315,8 @@
13.216 }
13.217
13.218 struct mem_region *mem_map_region( void *mem, uint32_t base, uint32_t size,
13.219 - const char *name, int flags, uint32_t repeat_offset,
13.220 - uint32_t repeat_until )
13.221 + const char *name, mem_region_fn_t fn, int flags,
13.222 + uint32_t repeat_offset, uint32_t repeat_until )
13.223 {
13.224 int i;
13.225 mem_rgn[num_mem_rgns].base = base;
13.226 @@ -234,71 +324,57 @@
13.227 mem_rgn[num_mem_rgns].flags = flags;
13.228 mem_rgn[num_mem_rgns].name = name;
13.229 mem_rgn[num_mem_rgns].mem = mem;
13.230 + mem_rgn[num_mem_rgns].fn = fn;
13.231 + fn->prefetch = unmapped_prefetch;
13.232 num_mem_rgns++;
13.233
13.234 do {
13.235 - for( i=0; i<size>>LXDREAM_PAGE_BITS; i++ )
13.236 - page_map[(base>>LXDREAM_PAGE_BITS)+i] = mem + (i<<LXDREAM_PAGE_BITS);
13.237 + for( i=0; i<size>>LXDREAM_PAGE_BITS; i++ ) {
13.238 + if( mem != NULL ) {
13.239 + page_map[(base>>LXDREAM_PAGE_BITS)+i] = mem + (i<<LXDREAM_PAGE_BITS);
13.240 + }
13.241 + ext_address_space[(base>>LXDREAM_PAGE_BITS)+i] = fn;
13.242 + mem_page_remapped( base + (i<<LXDREAM_PAGE_BITS), fn );
13.243 + }
13.244 base += repeat_offset;
13.245 } while( base <= repeat_until );
13.246
13.247 return &mem_rgn[num_mem_rgns-1];
13.248 }
13.249
13.250 -void *mem_create_ram_region( uint32_t base, uint32_t size, const char *name )
13.251 +gboolean mem_load_rom( void *output, const gchar *file, uint32_t size, uint32_t crc )
13.252 {
13.253 - return mem_create_repeating_ram_region( base, size, name, size, base );
13.254 -}
13.255 + if( file != NULL && file[0] != '\0' ) {
13.256 + FILE *f = fopen(file,"r");
13.257 + struct stat st;
13.258 + uint32_t calc_crc;
13.259
13.260 -void *mem_create_repeating_ram_region( uint32_t base, uint32_t size, const char *name,
13.261 - uint32_t repeat_offset, uint32_t repeat_until )
13.262 -{
13.263 - char *mem;
13.264 -
13.265 - assert( (base&0xFFFFF000) == base ); /* must be page aligned */
13.266 - assert( (size&0x00000FFF) == 0 );
13.267 - assert( num_mem_rgns < MAX_MEM_REGIONS );
13.268 - assert( page_map != NULL );
13.269 -
13.270 - mem = mem_alloc_pages( size>>LXDREAM_PAGE_BITS );
13.271 -
13.272 - mem_map_region( mem, base, size, name, MEM_FLAG_RAM, repeat_offset, repeat_until );
13.273 -
13.274 - return mem;
13.275 -}
13.276 -
13.277 -gboolean mem_load_rom( const gchar *file, uint32_t base, uint32_t size, uint32_t crc,
13.278 - const gchar *region_name )
13.279 -{
13.280 - sh4ptr_t mem;
13.281 - uint32_t calc_crc;
13.282 - int status;
13.283 -
13.284 - mem = mem_get_region(base);
13.285 - if( mem == NULL ) {
13.286 - mem = mmap( NULL, size, PROT_WRITE|PROT_READ, MAP_ANON|MAP_PRIVATE, -1, 0 );
13.287 - if( mem == MAP_FAILED ) {
13.288 - ERROR( "Unable to allocate ROM memory: %s (%s)", file, strerror(errno) );
13.289 + if( f == NULL ) {
13.290 + ERROR( "Unable to load file '%s': %s", file, strerror(errno) );
13.291 return FALSE;
13.292 }
13.293 - mem_map_region( mem, base, size, region_name, MEM_FLAG_ROM, size, base );
13.294 - } else {
13.295 - mprotect( mem, size, PROT_READ|PROT_WRITE );
13.296 - }
13.297
13.298 - if( file != NULL && file[0] != '\0' ) {
13.299 - status = mem_load_block( file, base, size );
13.300 - mprotect( mem, size, PROT_READ );
13.301 + fstat( fileno(f), &st );
13.302 + if( st.st_size != size ) {
13.303 + ERROR( "File '%s' is invalid, expected %d bytes but was %d bytes long.", file, size, st.st_size );
13.304 + fclose(f);
13.305 + return FALSE;
13.306 + }
13.307 +
13.308 + if( fread( output, 1, size, f ) != size ) {
13.309 + ERROR( "Failed to load file '%s': %s", file, strerror(errno) );
13.310 + fclose(f);
13.311 + return FALSE;
13.312 + }
13.313
13.314 - if( status == 0 ) {
13.315 - /* CRC check only if we loaded something */
13.316 - calc_crc = crc32(0L, (sh4ptr_t)mem, size);
13.317 - if( calc_crc != crc ) {
13.318 - WARN( "Bios CRC Mismatch in %s: %08X (expected %08X)",
13.319 - file, calc_crc, crc);
13.320 - }
13.321 - return TRUE;
13.322 + /* CRC check only if we loaded something */
13.323 + calc_crc = crc32(0L, output, size);
13.324 + if( calc_crc != crc ) {
13.325 + WARN( "Bios CRC Mismatch in %s: %08X (expected %08X)",
13.326 + file, calc_crc, crc);
13.327 }
13.328 + /* Even if the CRC fails, continue normally */
13.329 + return TRUE;
13.330 }
13.331 return FALSE;
13.332 }
13.333 @@ -334,6 +410,8 @@
13.334 P4_io[(io->base&0x1FFFFFFF)>>19] = io;
13.335 } else {
13.336 page_map[io->base>>12] = (sh4ptr_t)(uintptr_t)num_io_rgns;
13.337 + ext_address_space[io->base>>12] = &io->fn;
13.338 + mem_page_remapped( io->base, &io->fn );
13.339 }
13.340 io_rgn[num_io_rgns] = io;
13.341 num_io_rgns++;
13.342 @@ -344,16 +422,9 @@
13.343 while( *io ) register_io_region( *io++ );
13.344 }
13.345
13.346 -int mem_has_page( uint32_t addr )
13.347 +gboolean mem_has_page( uint32_t addr )
13.348 {
13.349 - sh4ptr_t page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
13.350 - return page != NULL;
13.351 -}
13.352 -
13.353 -sh4ptr_t mem_get_page( uint32_t addr )
13.354 -{
13.355 - sh4ptr_t page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
13.356 - return page;
13.357 + return ext_address_space[ (addr&0x1FFFFFFF)>>12 ] != &mem_region_unmapped;
13.358 }
13.359
13.360 sh4ptr_t mem_get_region( uint32_t addr )
13.361 @@ -368,22 +439,7 @@
13.362
13.363 void mem_write_long( sh4addr_t addr, uint32_t value )
13.364 {
13.365 - sh4ptr_t ptr = mem_get_region(addr);
13.366 - assert(ptr != NULL);
13.367 - *((uint32_t *)ptr) = value;
13.368 -}
13.369 -
13.370 -struct mmio_region *mem_get_io_region( uint32_t addr )
13.371 -{
13.372 - if( addr > 0xFF000000 ) {
13.373 - return P4_io[(addr&0x00FFFFFF)>>12];
13.374 - }
13.375 - sh4ptr_t page = page_map[(addr&0x1FFFFFFF)>>12];
13.376 - if( ((uintptr_t)page) < MAX_IO_REGIONS ) {
13.377 - return io_rgn[(uintptr_t)page];
13.378 - } else {
13.379 - return NULL;
13.380 - }
13.381 + ext_address_space[(addr&0x1FFFFFFF)>>12]->write_long(addr, value);
13.382 }
13.383
13.384 struct mmio_region *mem_get_io_region_by_name( const gchar *name )
14.1 --- a/src/mem.h Mon Dec 15 10:44:56 2008 +0000
14.2 +++ b/src/mem.h Tue Jan 13 11:56:28 2009 +0000
14.3 @@ -22,45 +22,93 @@
14.4
14.5 #include <stdint.h>
14.6 #include "lxdream.h"
14.7 +#include "hook.h"
14.8
14.9 #ifdef __cplusplus
14.10 extern "C" {
14.11 #endif
14.12
14.13 +
14.14 +typedef FASTCALL int32_t (*mem_read_fn_t)(sh4addr_t);
14.15 +typedef FASTCALL void (*mem_write_fn_t)(sh4addr_t, uint32_t);
14.16 +typedef FASTCALL void (*mem_read_burst_fn_t)(unsigned char *,sh4addr_t);
14.17 +typedef FASTCALL void (*mem_write_burst_fn_t)(sh4addr_t,unsigned char *);
14.18 +typedef FASTCALL void (*mem_prefetch_fn_t)(sh4addr_t);
14.19 +
14.20 +typedef FASTCALL int32_t (*mem_read_exc_fn_t)(sh4addr_t, void *);
14.21 +typedef FASTCALL void (*mem_write_exc_fn_t)(sh4addr_t, uint32_t, void *);
14.22 +typedef FASTCALL void (*mem_read_burst_exc_fn_t)(unsigned char *,sh4addr_t, void *);
14.23 +typedef FASTCALL void (*mem_write_burst_exc_fn_t)(sh4addr_t,unsigned char *, void *);
14.24 +typedef FASTCALL void (*mem_prefetch_exc_fn_t)(sh4addr_t, void *);
14.25 +
14.26 +/**
14.27 + * Basic memory region vtable - read/write at byte, word, long, and burst
14.28 + * (32-byte) sizes.
14.29 + */
14.30 +typedef struct mem_region_fn {
14.31 + mem_read_fn_t read_long;
14.32 + mem_write_fn_t write_long;
14.33 + mem_read_fn_t read_word;
14.34 + mem_write_fn_t write_word;
14.35 + mem_read_fn_t read_byte;
14.36 + mem_write_fn_t write_byte;
14.37 + mem_read_burst_fn_t read_burst;
14.38 + mem_write_burst_fn_t write_burst;
14.39 + /* Prefetch is provided as a convenience for the SH4 - external memory
14.40 + * spaces are automatically forced to unmapped_prefetch by mem.c
14.41 + */
14.42 + mem_prefetch_fn_t prefetch;
14.43 +} *mem_region_fn_t;
14.44 +
14.45 +int32_t FASTCALL unmapped_read_long( sh4addr_t addr );
14.46 +void FASTCALL unmapped_write_long( sh4addr_t addr, uint32_t val );
14.47 +void FASTCALL unmapped_read_burst( unsigned char *dest, sh4addr_t addr );
14.48 +void FASTCALL unmapped_write_burst( sh4addr_t addr, unsigned char *src );
14.49 +void FASTCALL unmapped_prefetch( sh4addr_t addr );
14.50 +extern struct mem_region_fn mem_region_unmapped;
14.51 +
14.52 typedef struct mem_region {
14.53 uint32_t base;
14.54 uint32_t size;
14.55 const char *name;
14.56 sh4ptr_t mem;
14.57 uint32_t flags;
14.58 + mem_region_fn_t fn;
14.59 } *mem_region_t;
14.60
14.61 #define MAX_IO_REGIONS 24
14.62 -#define MAX_MEM_REGIONS 8
14.63 +#define MAX_MEM_REGIONS 16
14.64
14.65 #define MEM_REGION_BIOS "Bios ROM"
14.66 #define MEM_REGION_MAIN "System RAM"
14.67 #define MEM_REGION_VIDEO "Video RAM"
14.68 +#define MEM_REGION_VIDEO64 "Video RAM 64-bit"
14.69 #define MEM_REGION_AUDIO "Audio RAM"
14.70 #define MEM_REGION_AUDIO_SCRATCH "Audio Scratch RAM"
14.71 #define MEM_REGION_FLASH "System Flash"
14.72 +#define MEM_REGION_PVR2TA "PVR2 TA Command"
14.73 +#define MEM_REGION_PVR2YUV "PVR2 YUV Decode"
14.74 +#define MEM_REGION_PVR2VDMA1 "PVR2 VRAM DMA 1"
14.75 +#define MEM_REGION_PVR2VDMA2 "PVR2 VRAM DMA 2"
14.76
14.77 -void *mem_create_ram_region( uint32_t base, uint32_t size, const char *name );
14.78 -void *mem_create_repeating_ram_region( uint32_t base, uint32_t size, const char *name,
14.79 - uint32_t repeat_offset, uint32_t last_repeat );
14.80 +typedef gboolean (*mem_page_remapped_hook_t)(sh4addr_t page, mem_region_fn_t newfn, void *user_data);
14.81 +DECLARE_HOOK( mem_page_remapped_hook, mem_page_remapped_hook_t );
14.82 +
14.83 +struct mem_region *mem_map_region( void *mem, uint32_t base, uint32_t size,
14.84 + const char *name, mem_region_fn_t fn, int flags, uint32_t repeat_offset,
14.85 + uint32_t repeat_until );
14.86 +
14.87 /**
14.88 * Load a ROM image from the specified filename. If the memory region has not
14.89 * been allocated, it is created now, otherwise the existing region is reused.
14.90 * If the CRC check fails, a warning will be printed.
14.91 * @return TRUE if the image was loaded successfully (irrespective of CRC failure).
14.92 */
14.93 -gboolean mem_load_rom( const gchar *filename, uint32_t base, uint32_t size,
14.94 - uint32_t crc, const gchar *region_name );
14.95 +gboolean mem_load_rom( void *output, const gchar *filename, uint32_t size, uint32_t crc );
14.96 void *mem_alloc_pages( int n );
14.97 sh4ptr_t mem_get_region( uint32_t addr );
14.98 sh4ptr_t mem_get_region_by_name( const char *name );
14.99 -int mem_has_page( uint32_t addr );
14.100 -sh4ptr_t mem_get_page( uint32_t addr );
14.101 +gboolean mem_has_page( uint32_t addr );
14.102 int mem_load_block( const gchar *filename, uint32_t base, uint32_t size );
14.103 int mem_save_block( const gchar *filename, uint32_t base, uint32_t size );
14.104 void mem_set_trace( const gchar *tracelist, int flag );
14.105 @@ -101,7 +149,20 @@
14.106 void mem_delete_watch( watch_point_t watch );
14.107 watch_point_t mem_is_watched( uint32_t addr, int size, int op );
14.108
14.109 -extern sh4ptr_t *page_map;
14.110 +extern mem_region_fn_t *ext_address_space;
14.111 +
14.112 +#define SIGNEXT4(n) ((((int32_t)(n))<<28)>>28)
14.113 +#define SIGNEXT8(n) ((int32_t)((int8_t)(n)))
14.114 +#define SIGNEXT12(n) ((((int32_t)(n))<<20)>>20)
14.115 +#define SIGNEXT16(n) ((int32_t)((int16_t)(n)))
14.116 +#define SIGNEXT32(n) ((int64_t)((int32_t)(n)))
14.117 +#define SIGNEXT48(n) ((((int64_t)(n))<<16)>>16)
14.118 +#define ZEROEXT32(n) ((int64_t)((uint64_t)((uint32_t)(n))))
14.119 +
14.120 +/* Ensure the given region allows all of read/write/execute. If not
14.121 + * page-aligned, some surrounding regions will similarly be unprotected.
14.122 + */
14.123 +void mem_unprotect( void *ptr, uint32_t size );
14.124
14.125 #ifdef __cplusplus
14.126 }
15.1 --- a/src/mmio.h Mon Dec 15 10:44:56 2008 +0000
15.2 +++ b/src/mmio.h Tue Jan 13 11:56:28 2009 +0000
15.3 @@ -16,8 +16,8 @@
15.4 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15.5 * GNU General Public License for more details.
15.6 */
15.7 -#ifndef dream_mmio_H
15.8 -#define dream_mmio_H 1
15.9 +#ifndef lxdream_mmio_H
15.10 +#define lxdream_mmio_H 1
15.11
15.12 #ifdef __cplusplus
15.13 extern "C" {
15.14 @@ -28,6 +28,7 @@
15.15
15.16 #include <stdint.h>
15.17 #include <stdlib.h>
15.18 +#include "mem.h"
15.19
15.20 #define LXDREAM_PAGE_TABLE_ENTRIES 128*1024
15.21 #define LXDREAM_PAGE_SIZE 4096
15.22 @@ -45,8 +46,7 @@
15.23 struct mmio_region {
15.24 char *id, *desc;
15.25 uint32_t base;
15.26 - int32_t (*io_read)(uint32_t addr);
15.27 - void (*io_write)(uint32_t addr, uint32_t val);
15.28 + struct mem_region_fn fn;
15.29 char *mem;
15.30 char *save_mem; /* Used to compare for gui updates */
15.31 struct mmio_port {
15.32 @@ -112,7 +112,7 @@
15.33 #undef MMIO_REGION_LIST_BEGIN
15.34 #undef MMIO_REGION
15.35 #undef MMIO_REGION_LIST_END
15.36 -#define MMIO_REGION_BEGIN(b,id,d) struct mmio_region mmio_region_##id = { #id, d, b, mmio_region_##id##_read, mmio_region_##id##_write, 0, 0, {
15.37 +#define MMIO_REGION_BEGIN(b,id,d) struct mmio_region mmio_region_##id = { #id, d, b, {mmio_region_##id##_read, mmio_region_##id##_write,mmio_region_##id##_read, mmio_region_##id##_write,mmio_region_##id##_read, mmio_region_##id##_write,NULL, NULL, unmapped_prefetch}, 0, 0, {
15.38 #define LONG_PORT( o,id,f,def,d ) { #id, d, 32, o, def, f },
15.39 #define WORD_PORT( o,id,f,def,d ) { #id, d, 16, o, def, f },
15.40 #define BYTE_PORT( o,id,f,def,d ) { #id, d, 8, o, def, f },
15.41 @@ -125,14 +125,16 @@
15.42 * actually need any direct code on read and/or write
15.43 */
15.44 #define MMIO_REGION_READ_STUBFN( id ) \
15.45 -int32_t mmio_region_##id##_read( uint32_t reg ) { \
15.46 +int32_t FASTCALL mmio_region_##id##_read( uint32_t reg ) { \
15.47 + reg = reg & 0xFFF; \
15.48 int32_t val = MMIO_READ( id, reg ); \
15.49 WARN( "Read from unimplemented module %s (%03X => %08X) [%s: %s]",\
15.50 #id, reg, val, MMIO_REGID(id,reg), MMIO_REGDESC(id,reg) ); \
15.51 return val; \
15.52 }
15.53 #define MMIO_REGION_WRITE_STUBFN( id ) \
15.54 -void mmio_region_##id##_write( uint32_t reg, uint32_t val ) { \
15.55 +void FASTCALL mmio_region_##id##_write( uint32_t reg, uint32_t val ) { \
15.56 + reg = reg & 0xFFF; \
15.57 WARN( "Write to unimplemented module %s (%03X <= %08X) [%s: %s]", \
15.58 #id, reg, val, MMIO_REGID(id,reg), MMIO_REGDESC(id,reg) ); \
15.59 MMIO_WRITE( id, reg, val ); \
15.60 @@ -141,32 +143,26 @@
15.61 MMIO_REGION_READ_STUBFN( id ) \
15.62 MMIO_REGION_WRITE_STUBFN( id )
15.63 #define MMIO_REGION_READ_DEFFN( id ) \
15.64 -int32_t mmio_region_##id##_read( uint32_t reg ) { \
15.65 - return MMIO_READ( id, reg ); \
15.66 +int32_t FASTCALL mmio_region_##id##_read( uint32_t reg ) { \
15.67 + return MMIO_READ( id, reg&0xFFF ); \
15.68 }
15.69 #define MMIO_REGION_WRITE_DEFFN( id ) \
15.70 -void mmio_region_##id##_write( uint32_t reg, uint32_t val ) { \
15.71 - MMIO_WRITE( id, reg, val ); \
15.72 +void FASTCALL mmio_region_##id##_write( uint32_t reg, uint32_t val ) { \
15.73 + MMIO_WRITE( id, reg&0xFFF, val ); \
15.74 }
15.75 #define MMIO_REGION_DEFFNS( id ) \
15.76 MMIO_REGION_READ_DEFFN( id ) \
15.77 MMIO_REGION_WRITE_DEFFN( id )
15.78 #endif
15.79
15.80 -#define MMIO_REGION_WRITE_FN( id, reg, val ) \
15.81 -void mmio_region_##id##_write( uint32_t reg, uint32_t val )
15.82 -
15.83 -#define MMIO_REGION_READ_FN( id, reg ) \
15.84 -int32_t mmio_region_##id##_read( uint32_t reg )
15.85 -
15.86 #else
15.87
15.88 #ifndef MMIO_IFACE_INCLUDED
15.89 #define MMIO_IFACE_INCLUDED
15.90 #define MMIO_REGION_BEGIN(b,id,d) \
15.91 extern struct mmio_region mmio_region_##id; \
15.92 -int32_t mmio_region_##id##_read(uint32_t); \
15.93 -void mmio_region_##id##_write(uint32_t, uint32_t); \
15.94 +int32_t FASTCALL mmio_region_##id##_read(uint32_t); \
15.95 +void FASTCALL mmio_region_##id##_write(uint32_t, uint32_t); \
15.96 enum mmio_region_##id##_port_t {
15.97 #define LONG_PORT( o,id,f,def,d ) id = o,
15.98 #define WORD_PORT( o,id,f,def,d ) id = o,
15.99 @@ -177,5 +173,12 @@
15.100 #define MMIO_REGION_LIST_END
15.101 #endif
15.102
15.103 +#define MMIO_REGION_WRITE_FN( id, reg, val ) \
15.104 +void FASTCALL mmio_region_##id##_write( uint32_t reg, uint32_t val )
15.105 +
15.106 +#define MMIO_REGION_READ_FN( id, reg ) \
15.107 +int32_t FASTCALL mmio_region_##id##_read( uint32_t reg )
15.108 +
15.109 +
15.110 #endif
15.111
16.1 --- a/src/pvr2/glrender.c Mon Dec 15 10:44:56 2008 +0000
16.2 +++ b/src/pvr2/glrender.c Tue Jan 13 11:56:28 2009 +0000
16.3 @@ -24,7 +24,7 @@
16.4 #include "pvr2/scene.h"
16.5 #include "pvr2/glutil.h"
16.6
16.7 -#define IS_EMPTY_TILE_LIST(p) ((*((uint32_t *)(video_base+(p))) >> 28) == 0x0F)
16.8 +#define IS_EMPTY_TILE_LIST(p) ((*((uint32_t *)(pvr2_main_ram+(p))) >> 28) == 0x0F)
16.9
16.10 int pvr2_poly_depthmode[8] = { GL_NEVER, GL_LESS, GL_EQUAL, GL_LEQUAL,
16.11 GL_GREATER, GL_NOTEQUAL, GL_GEQUAL,
16.12 @@ -274,7 +274,7 @@
16.13
16.14 void gl_render_tilelist( pvraddr_t tile_entry, GLint depth_mode )
16.15 {
16.16 - uint32_t *tile_list = (uint32_t *)(video_base+tile_entry);
16.17 + uint32_t *tile_list = (uint32_t *)(pvr2_main_ram+tile_entry);
16.18 int strip_count;
16.19 struct polygon_struct *poly;
16.20
16.21 @@ -287,7 +287,7 @@
16.22 case 0x0F:
16.23 return; // End-of-list
16.24 case 0x0E:
16.25 - tile_list = (uint32_t *)(video_base + (entry&0x007FFFFF));
16.26 + tile_list = (uint32_t *)(pvr2_main_ram + (entry&0x007FFFFF));
16.27 break;
16.28 case 0x08: case 0x09: case 0x0A: case 0x0B:
16.29 strip_count = ((entry >> 25) & 0x0F)+1;
16.30 @@ -313,7 +313,7 @@
16.31 */
16.32 void gl_render_tilelist_depthonly( pvraddr_t tile_entry )
16.33 {
16.34 - uint32_t *tile_list = (uint32_t *)(video_base+tile_entry);
16.35 + uint32_t *tile_list = (uint32_t *)(pvr2_main_ram+tile_entry);
16.36 int strip_count;
16.37 struct polygon_struct *poly;
16.38
16.39 @@ -330,7 +330,7 @@
16.40 case 0x0F:
16.41 return; // End-of-list
16.42 case 0x0E:
16.43 - tile_list = (uint32_t *)(video_base + (entry&0x007FFFFF));
16.44 + tile_list = (uint32_t *)(pvr2_main_ram + (entry&0x007FFFFF));
16.45 break;
16.46 case 0x08: case 0x09: case 0x0A: case 0x0B:
16.47 strip_count = ((entry >> 25) & 0x0F)+1;
16.48 @@ -428,7 +428,7 @@
16.49
16.50 void gl_render_modifier_tilelist( pvraddr_t tile_entry, uint32_t tile_bounds[] )
16.51 {
16.52 - uint32_t *tile_list = (uint32_t *)(video_base+tile_entry);
16.53 + uint32_t *tile_list = (uint32_t *)(pvr2_main_ram+tile_entry);
16.54 int strip_count;
16.55 struct polygon_struct *poly;
16.56
16.57 @@ -456,7 +456,7 @@
16.58 glStencilOp( GL_KEEP, GL_KEEP, GL_KEEP );
16.59 return; // End-of-list
16.60 case 0x0E:
16.61 - tile_list = (uint32_t *)(video_base + (entry&0x007FFFFF));
16.62 + tile_list = (uint32_t *)(pvr2_main_ram + (entry&0x007FFFFF));
16.63 break;
16.64 case 0x08: case 0x09: case 0x0A: case 0x0B:
16.65 strip_count = ((entry >> 25) & 0x0F)+1;
17.1 --- a/src/pvr2/pvr2.c Mon Dec 15 10:44:56 2008 +0000
17.2 +++ b/src/pvr2/pvr2.c Tue Jan 13 11:56:28 2009 +0000
17.3 @@ -31,8 +31,6 @@
17.4 #define MMIO_IMPL
17.5 #include "pvr2/pvr2mmio.h"
17.6
17.7 -unsigned char *video_base;
17.8 -
17.9 #define MAX_RENDER_BUFFERS 4
17.10
17.11 #define HPOS_PER_FRAME 0
17.12 @@ -142,12 +140,10 @@
17.13 int i;
17.14 register_io_region( &mmio_region_PVR2 );
17.15 register_io_region( &mmio_region_PVR2PAL );
17.16 - register_io_region( &mmio_region_PVR2TA );
17.17 register_event_callback( EVENT_HPOS, pvr2_hpos_callback );
17.18 register_event_callback( EVENT_SCANLINE1, pvr2_scanline_callback );
17.19 register_event_callback( EVENT_SCANLINE2, pvr2_scanline_callback );
17.20 register_event_callback( EVENT_GUNPOS, pvr2_gunpos_callback );
17.21 - video_base = mem_get_region_by_name( MEM_REGION_VIDEO );
17.22 texcache_init();
17.23 pvr2_reset();
17.24 pvr2_ta_reset();
17.25 @@ -445,7 +441,7 @@
17.26 }
17.27 fbuf.address = (fbuf.address & 0x00FFFFFF) + PVR2_RAM_BASE;
17.28 fbuf.inverted = FALSE;
17.29 - fbuf.data = video_base + (fbuf.address&0x00FFFFFF);
17.30 + fbuf.data = pvr2_main_ram + (fbuf.address&0x00FFFFFF);
17.31
17.32 render_buffer_t rbuf = pvr2_get_render_buffer( &fbuf );
17.33 if( rbuf == NULL ) {
17.34 @@ -462,8 +458,9 @@
17.35 * This has to handle every single register individually as they all get masked
17.36 * off differently (and its easier to do it at write time)
17.37 */
17.38 -void mmio_region_PVR2_write( uint32_t reg, uint32_t val )
17.39 +MMIO_REGION_WRITE_FN( PVR2, reg, val )
17.40 {
17.41 + reg &= 0xFFF;
17.42 if( reg >= 0x200 && reg < 0x600 ) { /* Fog table */
17.43 MMIO_WRITE( PVR2, reg, val );
17.44 return;
17.45 @@ -826,6 +823,7 @@
17.46
17.47 MMIO_REGION_READ_FN( PVR2, reg )
17.48 {
17.49 + reg &= 0xFFF;
17.50 switch( reg ) {
17.51 case DISP_SYNCSTAT:
17.52 return pvr2_get_sync_status();
17.53 @@ -836,6 +834,7 @@
17.54
17.55 MMIO_REGION_WRITE_FN( PVR2PAL, reg, val )
17.56 {
17.57 + reg &= 0xFFF;
17.58 MMIO_WRITE( PVR2PAL, reg, val );
17.59 pvr2_state.palette_changed = TRUE;
17.60 }
17.61 @@ -855,19 +854,6 @@
17.62 mmio_region_PVR2_write( DISP_ADDR1, base );
17.63 }
17.64
17.65 -
17.66 -
17.67 -
17.68 -int32_t mmio_region_PVR2TA_read( uint32_t reg )
17.69 -{
17.70 - return 0xFFFFFFFF;
17.71 -}
17.72 -
17.73 -void mmio_region_PVR2TA_write( uint32_t reg, uint32_t val )
17.74 -{
17.75 - pvr2_ta_write( (unsigned char *)&val, sizeof(uint32_t) );
17.76 -}
17.77 -
17.78 render_buffer_t pvr2_create_render_buffer( sh4addr_t addr, int width, int height, GLuint tex_id )
17.79 {
17.80 if( display_driver != NULL && display_driver->create_render_buffer != NULL ) {
18.1 --- a/src/pvr2/pvr2.h Mon Dec 15 10:44:56 2008 +0000
18.2 +++ b/src/pvr2/pvr2.h Tue Jan 13 11:56:28 2009 +0000
18.3 @@ -134,6 +134,8 @@
18.4
18.5 /****************************** Frame Buffer *****************************/
18.6
18.7 +extern unsigned char pvr2_main_ram[];
18.8 +
18.9 /**
18.10 * Write a block of data to an address in the DMA range (0x10000000 -
18.11 * 0x13FFFFFF), ie TA, YUV, or texture ram.
18.12 @@ -219,6 +221,8 @@
18.13 */
18.14 void pvr2_ta_write( unsigned char *buf, uint32_t length );
18.15
18.16 +void FASTCALL pvr2_ta_write_burst( sh4addr_t addr, unsigned char *buf );
18.17 +
18.18 /**
18.19 * Find the first polygon or sprite context in the supplied buffer of TA
18.20 * data.
19.1 --- a/src/pvr2/pvr2mem.c Mon Dec 15 10:44:56 2008 +0000
19.2 +++ b/src/pvr2/pvr2mem.c Tue Jan 13 11:56:28 2009 +0000
19.3 @@ -18,11 +18,175 @@
19.4 #include <string.h>
19.5 #include <stdio.h>
19.6 #include <errno.h>
19.7 +#include "sh4/sh4core.h"
19.8 #include "pvr2.h"
19.9 #include "asic.h"
19.10 #include "dream.h"
19.11
19.12 -extern unsigned char *video_base;
19.13 +unsigned char pvr2_main_ram[8 MB];
19.14 +
19.15 +/************************* VRAM32 address space ***************************/
19.16 +
19.17 +static int32_t FASTCALL pvr2_vram32_read_long( sh4addr_t addr )
19.18 +{
19.19 + pvr2_render_buffer_invalidate(addr, FALSE);
19.20 + return *((int32_t *)(pvr2_main_ram+(addr&0x007FFFFF)));
19.21 +}
19.22 +static int32_t FASTCALL pvr2_vram32_read_word( sh4addr_t addr )
19.23 +{
19.24 + pvr2_render_buffer_invalidate(addr, FALSE);
19.25 + return SIGNEXT16(*((int16_t *)(pvr2_main_ram+(addr&0x007FFFFF))));
19.26 +}
19.27 +static int32_t FASTCALL pvr2_vram32_read_byte( sh4addr_t addr )
19.28 +{
19.29 + pvr2_render_buffer_invalidate(addr, FALSE);
19.30 + return SIGNEXT8(*((int8_t *)(pvr2_main_ram+(addr&0x007FFFFF))));
19.31 +}
19.32 +static void FASTCALL pvr2_vram32_write_long( sh4addr_t addr, uint32_t val )
19.33 +{
19.34 + pvr2_render_buffer_invalidate(addr, TRUE);
19.35 + *(uint32_t *)(pvr2_main_ram + (addr&0x007FFFFF)) = val;
19.36 +}
19.37 +static void FASTCALL pvr2_vram32_write_word( sh4addr_t addr, uint32_t val )
19.38 +{
19.39 + pvr2_render_buffer_invalidate(addr, TRUE);
19.40 + *(uint16_t *)(pvr2_main_ram + (addr&0x007FFFFF)) = (uint16_t)val;
19.41 +}
19.42 +static void FASTCALL pvr2_vram32_write_byte( sh4addr_t addr, uint32_t val )
19.43 +{
19.44 + pvr2_render_buffer_invalidate(addr, TRUE);
19.45 + *(uint8_t *)(pvr2_main_ram + (addr&0x007FFFFF)) = (uint8_t)val;
19.46 +}
19.47 +static void FASTCALL pvr2_vram32_read_burst( unsigned char *dest, sh4addr_t addr )
19.48 +{
19.49 + // Render buffers pretty much have to be (at least) 32-byte aligned
19.50 + pvr2_render_buffer_invalidate(addr, FALSE);
19.51 + memcpy( dest, (pvr2_main_ram + (addr&0x007FFFFF)), 32 );
19.52 +}
19.53 +static void FASTCALL pvr2_vram32_write_burst( sh4addr_t addr, unsigned char *src )
19.54 +{
19.55 + // Render buffers pretty much have to be (at least) 32-byte aligned
19.56 + pvr2_render_buffer_invalidate(addr, TRUE);
19.57 + memcpy( (pvr2_main_ram + (addr&0x007FFFFF)), src, 32 );
19.58 +}
19.59 +
19.60 +struct mem_region_fn mem_region_vram32 = { pvr2_vram32_read_long, pvr2_vram32_write_long,
19.61 + pvr2_vram32_read_word, pvr2_vram32_write_word,
19.62 + pvr2_vram32_read_byte, pvr2_vram32_write_byte,
19.63 + pvr2_vram32_read_burst, pvr2_vram32_write_burst };
19.64 +
19.65 +/************************* VRAM64 address space ***************************/
19.66 +
19.67 +#define TRANSLATE_VIDEO_64BIT_ADDRESS(a) ( (((a)&0x00FFFFF8)>>1)|(((a)&0x00000004)<<20)|((a)&0x03) )
19.68 +
19.69 +static int32_t FASTCALL pvr2_vram64_read_long( sh4addr_t addr )
19.70 +{
19.71 + addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
19.72 + pvr2_render_buffer_invalidate(addr, FALSE);
19.73 + return *((int32_t *)(pvr2_main_ram+(addr&0x007FFFFF)));
19.74 +}
19.75 +static int32_t FASTCALL pvr2_vram64_read_word( sh4addr_t addr )
19.76 +{
19.77 + addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
19.78 + pvr2_render_buffer_invalidate(addr, FALSE);
19.79 + return SIGNEXT16(*((int16_t *)(pvr2_main_ram+(addr&0x007FFFFF))));
19.80 +}
19.81 +static int32_t FASTCALL pvr2_vram64_read_byte( sh4addr_t addr )
19.82 +{
19.83 + addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
19.84 + pvr2_render_buffer_invalidate(addr, FALSE);
19.85 + return SIGNEXT8(*((int8_t *)(pvr2_main_ram+(addr&0x007FFFFF))));
19.86 +}
19.87 +static void FASTCALL pvr2_vram64_write_long( sh4addr_t addr, uint32_t val )
19.88 +{
19.89 + texcache_invalidate_page(addr& 0x007FFFFF);
19.90 + addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
19.91 + pvr2_render_buffer_invalidate(addr, TRUE);
19.92 + *(uint32_t *)(pvr2_main_ram + (addr&0x007FFFFF)) = val;
19.93 +}
19.94 +static void FASTCALL pvr2_vram64_write_word( sh4addr_t addr, uint32_t val )
19.95 +{
19.96 + texcache_invalidate_page(addr& 0x007FFFFF);
19.97 + addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
19.98 + pvr2_render_buffer_invalidate(addr, TRUE);
19.99 + *(uint16_t *)(pvr2_main_ram + (addr&0x007FFFFF)) = (uint16_t)val;
19.100 +}
19.101 +static void FASTCALL pvr2_vram64_write_byte( sh4addr_t addr, uint32_t val )
19.102 +{
19.103 + texcache_invalidate_page(addr& 0x007FFFFF);
19.104 + addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
19.105 + pvr2_render_buffer_invalidate(addr, TRUE);
19.106 + *(uint8_t *)(pvr2_main_ram + (addr&0x007FFFFF)) = (uint8_t)val;
19.107 +}
19.108 +static void FASTCALL pvr2_vram64_read_burst( unsigned char *dest, sh4addr_t addr )
19.109 +{
19.110 + pvr2_vram64_read( dest, addr, 32 );
19.111 +}
19.112 +static void FASTCALL pvr2_vram64_write_burst( sh4addr_t addr, unsigned char *src )
19.113 +{
19.114 + pvr2_vram64_write( addr, src, 32 );
19.115 +}
19.116 +
19.117 +struct mem_region_fn mem_region_vram64 = { pvr2_vram64_read_long, pvr2_vram64_write_long,
19.118 + pvr2_vram64_read_word, pvr2_vram64_write_word,
19.119 + pvr2_vram64_read_byte, pvr2_vram64_write_byte,
19.120 + pvr2_vram64_read_burst, pvr2_vram64_write_burst };
19.121 +
19.122 +/******************************* Burst areas ******************************/
19.123 +
19.124 +static void FASTCALL pvr2_vramdma1_write_burst( sh4addr_t destaddr, unsigned char *src )
19.125 +{
19.126 + int region = MMIO_READ( ASIC, PVRDMARGN1 );
19.127 + if( region == 0 ) {
19.128 + pvr2_vram64_write( destaddr, src, 32 );
19.129 + } else {
19.130 + destaddr &= PVR2_RAM_MASK;
19.131 + unsigned char *dest = pvr2_main_ram + destaddr;
19.132 + memcpy( dest, src, 32 );
19.133 + }
19.134 +}
19.135 +
19.136 +static void FASTCALL pvr2_vramdma2_write_burst( sh4addr_t destaddr, unsigned char *src )
19.137 +{
19.138 + int region = MMIO_READ( ASIC, PVRDMARGN2 );
19.139 + if( region == 0 ) {
19.140 + pvr2_vram64_write( destaddr, src, 32 );
19.141 + } else {
19.142 + destaddr &= PVR2_RAM_MASK;
19.143 + unsigned char *dest = pvr2_main_ram + destaddr;
19.144 + memcpy( dest, src, 32 );
19.145 + }
19.146 +}
19.147 +
19.148 +static void FASTCALL pvr2_yuv_write_burst( sh4addr_t destaddr, unsigned char *src )
19.149 +{
19.150 + pvr2_yuv_write( src, 32 );
19.151 +}
19.152 +
19.153 +struct mem_region_fn mem_region_pvr2ta = {
19.154 + unmapped_read_long, unmapped_write_long,
19.155 + unmapped_read_long, unmapped_write_long,
19.156 + unmapped_read_long, unmapped_write_long,
19.157 + unmapped_read_burst, pvr2_ta_write_burst };
19.158 +
19.159 +struct mem_region_fn mem_region_pvr2yuv = {
19.160 + unmapped_read_long, unmapped_write_long,
19.161 + unmapped_read_long, unmapped_write_long,
19.162 + unmapped_read_long, unmapped_write_long,
19.163 + unmapped_read_burst, pvr2_yuv_write_burst };
19.164 +
19.165 +struct mem_region_fn mem_region_pvr2vdma1 = {
19.166 + unmapped_read_long, unmapped_write_long,
19.167 + unmapped_read_long, unmapped_write_long,
19.168 + unmapped_read_long, unmapped_write_long,
19.169 + unmapped_read_burst, pvr2_vramdma1_write_burst };
19.170 +
19.171 +struct mem_region_fn mem_region_pvr2vdma2 = {
19.172 + unmapped_read_long, unmapped_write_long,
19.173 + unmapped_read_long, unmapped_write_long,
19.174 + unmapped_read_long, unmapped_write_long,
19.175 + unmapped_read_burst, pvr2_vramdma2_write_burst };
19.176 +
19.177
19.178 void pvr2_dma_write( sh4addr_t destaddr, unsigned char *src, uint32_t count )
19.179 {
19.180 @@ -40,7 +204,7 @@
19.181 pvr2_vram64_write( destaddr, src, count );
19.182 } else {
19.183 destaddr &= PVR2_RAM_MASK;
19.184 - unsigned char *dest = video_base + destaddr;
19.185 + unsigned char *dest = pvr2_main_ram + destaddr;
19.186 if( PVR2_RAM_SIZE - destaddr < count ) {
19.187 count = PVR2_RAM_SIZE - destaddr;
19.188 }
19.189 @@ -58,7 +222,7 @@
19.190 pvr2_vram64_write( destaddr, src, count );
19.191 } else {
19.192 destaddr &= PVR2_RAM_MASK;
19.193 - unsigned char *dest = video_base + destaddr;
19.194 + unsigned char *dest = pvr2_main_ram + destaddr;
19.195 if( PVR2_RAM_SIZE - destaddr < count ) {
19.196 count = PVR2_RAM_SIZE - destaddr;
19.197 }
19.198 @@ -83,7 +247,7 @@
19.199 texcache_invalidate_page( i );
19.200 }
19.201
19.202 - banks[0] = ((uint32_t *)(video_base + ((destaddr & 0x007FFFF8) >>1)));
19.203 + banks[0] = ((uint32_t *)(pvr2_main_ram + ((destaddr & 0x007FFFF8) >>1)));
19.204 banks[1] = banks[0] + 0x100000;
19.205 if( bank_flag )
19.206 banks[0]++;
19.207 @@ -134,7 +298,7 @@
19.208 texcache_invalidate_page( i );
19.209 }
19.210
19.211 - banks[0] = (uint32_t *)(video_base + (destaddr >>1));
19.212 + banks[0] = (uint32_t *)(pvr2_main_ram + (destaddr >>1));
19.213 banks[1] = banks[0] + 0x100000;
19.214
19.215 for( i=0; i<line_count; i++ ) {
19.216 @@ -178,7 +342,7 @@
19.217 line_bytes = dest_line_bytes >> 2;
19.218 }
19.219
19.220 - banks[0] = (uint32_t *)(video_base + (srcaddr>>1));
19.221 + banks[0] = (uint32_t *)(pvr2_main_ram + (srcaddr>>1));
19.222 banks[1] = banks[0] + 0x100000;
19.223 if( bank_flag )
19.224 banks[0]++;
19.225 @@ -309,7 +473,7 @@
19.226
19.227 srcaddr = srcaddr & 0x7FFFF8;
19.228
19.229 - banks[0] = (uint8_t *)(video_base + (srcaddr>>1));
19.230 + banks[0] = (uint8_t *)(pvr2_main_ram + (srcaddr>>1));
19.231 banks[1] = banks[0] + 0x400000;
19.232 if( offset_flag & 0x04 ) { // If source is not 64-bit aligned, swap the banks
19.233 uint8_t *tmp = banks[0];
19.234 @@ -351,7 +515,7 @@
19.235
19.236 srcaddr = srcaddr & 0x7FFFF8;
19.237
19.238 - banks[0] = (uint8_t *)(video_base + (srcaddr>>1));
19.239 + banks[0] = (uint8_t *)(pvr2_main_ram + (srcaddr>>1));
19.240 banks[1] = banks[0] + 0x400000;
19.241 if( offset_flag & 0x04 ) { // If source is not 64-bit aligned, swap the banks
19.242 uint8_t *tmp = banks[0];
19.243 @@ -392,7 +556,7 @@
19.244
19.245 srcaddr = srcaddr & 0x7FFFF8;
19.246
19.247 - banks[0] = (uint16_t *)(video_base + (srcaddr>>1));
19.248 + banks[0] = (uint16_t *)(pvr2_main_ram + (srcaddr>>1));
19.249 banks[1] = banks[0] + 0x200000;
19.250 if( offset_flag & 0x02 ) { // If source is not 64-bit aligned, swap the banks
19.251 uint16_t *tmp = banks[0];
19.252 @@ -422,7 +586,7 @@
19.253 uint32_t line_size, uint32_t dest_stride,
19.254 uint32_t src_stride )
19.255 {
19.256 - unsigned char *dest = video_base + (destaddr & 0x007FFFFF);
19.257 + unsigned char *dest = pvr2_main_ram + (destaddr & 0x007FFFFF);
19.258 unsigned char *p = src + src_size - src_stride;
19.259 while( p >= src ) {
19.260 memcpy( dest, p, line_size );
19.261 @@ -447,7 +611,7 @@
19.262 texcache_invalidate_page( i );
19.263 }
19.264
19.265 - banks[0] = (uint32_t *)(video_base + (destaddr >>1));
19.266 + banks[0] = (uint32_t *)(pvr2_main_ram + (destaddr >>1));
19.267 banks[1] = banks[0] + 0x100000;
19.268
19.269 while( dwsrc >= (uint32_t *)src ) {
19.270 @@ -469,7 +633,7 @@
19.271 uint32_t line_size, uint32_t dest_stride,
19.272 uint32_t src_stride, int bpp )
19.273 {
19.274 - unsigned char *dest = video_base + (destaddr & 0x007FFFFF);
19.275 + unsigned char *dest = pvr2_main_ram + (destaddr & 0x007FFFFF);
19.276 unsigned char *p = src + src_size - src_stride;
19.277 while( p >= src ) {
19.278 unsigned char *s = p, *d = dest;
19.279 @@ -496,7 +660,7 @@
19.280 if( srcaddr + length > 0x800000 )
19.281 length = 0x800000 - srcaddr;
19.282
19.283 - banks[0] = ((uint32_t *)(video_base + ((srcaddr&0x007FFFF8)>>1)));
19.284 + banks[0] = ((uint32_t *)(pvr2_main_ram + ((srcaddr&0x007FFFF8)>>1)));
19.285 banks[1] = banks[0] + 0x100000;
19.286 if( bank_flag )
19.287 banks[0]++;
20.1 --- a/src/pvr2/pvr2mmio.h Mon Dec 15 10:44:56 2008 +0000
20.2 +++ b/src/pvr2/pvr2mmio.h Tue Jan 13 11:56:28 2009 +0000
20.3 @@ -95,7 +95,3 @@
20.4 MMIO_REGION_BEGIN( 0x005F9000, PVR2PAL, "Power VR/2 CLUT Palettes" )
20.5 LONG_PORT( 0x000, PAL0_0, PORT_MRW, 0, "Pal0 colour 0" )
20.6 MMIO_REGION_END
20.7 -
20.8 -MMIO_REGION_BEGIN( 0x10000000, PVR2TA, "Power VR/2 TA Command port" )
20.9 - LONG_PORT( 0x000, TACMD, PORT_MRW, 0, "TA Command port" )
20.10 -MMIO_REGION_END
21.1 --- a/src/pvr2/rendsave.c Mon Dec 15 10:44:56 2008 +0000
21.2 +++ b/src/pvr2/rendsave.c Tue Jan 13 11:56:28 2009 +0000
21.3 @@ -31,8 +31,6 @@
21.4 #define SAVE_PAGE_SIZE 1024
21.5 #define SAVE_PAGE_COUNT 8192
21.6
21.7 -extern char *video_base;
21.8 -
21.9 /* Determine pages of memory to save. Start walking from the render tilemap
21.10 * data and build up a page list
21.11 */
21.12 @@ -88,7 +86,7 @@
21.13 uint32_t length = (j-i) * SAVE_PAGE_SIZE;
21.14 fwrite( &start, sizeof(uint32_t), 1, f );
21.15 fwrite( &length, sizeof(uint32_t), 1, f );
21.16 - fwrite( video_base + start, 1, length, f );
21.17 + fwrite( pvr2_main_ram + start, 1, length, f );
21.18 i = j-1;
21.19 }
21.20 }
22.1 --- a/src/pvr2/rendsort.c Mon Dec 15 10:44:56 2008 +0000
22.2 +++ b/src/pvr2/rendsort.c Tue Jan 13 11:56:28 2009 +0000
22.3 @@ -41,14 +41,14 @@
22.4 * pvr memory address.
22.5 */
22.6 static int sort_count_triangles( pvraddr_t tile_entry ) {
22.7 - uint32_t *tile_list = (uint32_t *)(video_base+tile_entry);
22.8 + uint32_t *tile_list = (uint32_t *)(pvr2_main_ram+tile_entry);
22.9 int count = 0;
22.10 while(1) {
22.11 uint32_t entry = *tile_list++;
22.12 if( entry >> 28 == 0x0F ) {
22.13 break;
22.14 } else if( entry >> 28 == 0x0E ) {
22.15 - tile_list = (uint32_t *)(video_base+(entry&0x007FFFFF));
22.16 + tile_list = (uint32_t *)(pvr2_main_ram+(entry&0x007FFFFF));
22.17 } else if( entry >> 29 == 0x04 ) { /* Triangle array */
22.18 count += ((entry >> 25) & 0x0F)+1;
22.19 } else if( entry >> 29 == 0x05 ) { /* Quad array */
22.20 @@ -100,7 +100,7 @@
22.21 */
22.22 int sort_extract_triangles( pvraddr_t tile_entry, struct sort_triangle *triangles )
22.23 {
22.24 - uint32_t *tile_list = (uint32_t *)(video_base+tile_entry);
22.25 + uint32_t *tile_list = (uint32_t *)(pvr2_main_ram+tile_entry);
22.26 int strip_count;
22.27 struct polygon_struct *poly;
22.28 int count = 0, i;
22.29 @@ -111,7 +111,7 @@
22.30 case 0x0F:
22.31 return count; // End-of-list
22.32 case 0x0E:
22.33 - tile_list = (uint32_t *)(video_base + (entry&0x007FFFFF));
22.34 + tile_list = (uint32_t *)(pvr2_main_ram + (entry&0x007FFFFF));
22.35 break;
22.36 case 0x08: case 0x09: case 0x0A: case 0x0B:
22.37 strip_count = ((entry >> 25) & 0x0F)+1;
23.1 --- a/src/pvr2/scene.c Mon Dec 15 10:44:56 2008 +0000
23.2 +++ b/src/pvr2/scene.c Tue Jan 13 11:56:28 2009 +0000
23.3 @@ -506,13 +506,13 @@
23.4
23.5 static void scene_extract_polygons( pvraddr_t tile_entry )
23.6 {
23.7 - uint32_t *tile_list = (uint32_t *)(video_base+tile_entry);
23.8 + uint32_t *tile_list = (uint32_t *)(pvr2_main_ram+tile_entry);
23.9 do {
23.10 uint32_t entry = *tile_list++;
23.11 if( entry >> 28 == 0x0F ) {
23.12 break;
23.13 } else if( entry >> 28 == 0x0E ) {
23.14 - tile_list = (uint32_t *)(video_base + (entry&0x007FFFFF));
23.15 + tile_list = (uint32_t *)(pvr2_main_ram + (entry&0x007FFFFF));
23.16 } else {
23.17 pvraddr_t polyaddr = entry&0x000FFFFF;
23.18 shadow_mode_t is_modified = (entry & 0x01000000) ? pvr2_scene.shadow_mode : SHADOW_NONE;
23.19 @@ -571,13 +571,13 @@
23.20
23.21 static void scene_extract_vertexes( pvraddr_t tile_entry )
23.22 {
23.23 - uint32_t *tile_list = (uint32_t *)(video_base+tile_entry);
23.24 + uint32_t *tile_list = (uint32_t *)(pvr2_main_ram+tile_entry);
23.25 do {
23.26 uint32_t entry = *tile_list++;
23.27 if( entry >> 28 == 0x0F ) {
23.28 break;
23.29 } else if( entry >> 28 == 0x0E ) {
23.30 - tile_list = (uint32_t *)(video_base + (entry&0x007FFFFF));
23.31 + tile_list = (uint32_t *)(pvr2_main_ram + (entry&0x007FFFFF));
23.32 } else {
23.33 pvraddr_t polyaddr = entry&0x000FFFFF;
23.34 shadow_mode_t is_modified = (entry & 0x01000000) ? pvr2_scene.shadow_mode : SHADOW_NONE;
23.35 @@ -737,11 +737,11 @@
23.36 fog_col = MMIO_READ( PVR2, RENDER_FOGVRTCOL );
23.37 unpack_bgra( fog_col, pvr2_scene.fog_vert_colour );
23.38
23.39 - uint32_t *tilebuffer = (uint32_t *)(video_base + MMIO_READ( PVR2, RENDER_TILEBASE ));
23.40 + uint32_t *tilebuffer = (uint32_t *)(pvr2_main_ram + MMIO_READ( PVR2, RENDER_TILEBASE ));
23.41 uint32_t *segment = tilebuffer;
23.42 uint32_t shadow = MMIO_READ(PVR2,RENDER_SHADOW);
23.43 pvr2_scene.segment_list = (struct tile_segment *)tilebuffer;
23.44 - pvr2_scene.pvr2_pbuf = (uint32_t *)(video_base + MMIO_READ(PVR2,RENDER_POLYBASE));
23.45 + pvr2_scene.pvr2_pbuf = (uint32_t *)(pvr2_main_ram + MMIO_READ(PVR2,RENDER_POLYBASE));
23.46 pvr2_scene.shadow_mode = shadow & 0x100 ? SHADOW_CHEAP : SHADOW_FULL;
23.47 scene_shadow_intensity = U8TOFLOAT(shadow&0xFF);
23.48
23.49 @@ -814,7 +814,7 @@
23.50 fprintf( f, "Polygons: %d\n", pvr2_scene.poly_count );
23.51 for( i=0; i<pvr2_scene.poly_count; i++ ) {
23.52 struct polygon_struct *poly = &pvr2_scene.poly_array[i];
23.53 - fprintf( f, " %08X ", ((unsigned char *)poly->context) - video_base );
23.54 + fprintf( f, " %08X ", ((unsigned char *)poly->context) - pvr2_main_ram );
23.55 switch( poly->vertex_count ) {
23.56 case 3: fprintf( f, "Tri " ); break;
23.57 case 4: fprintf( f, "Quad " ); break;
24.1 --- a/src/pvr2/tacore.c Mon Dec 15 10:44:56 2008 +0000
24.2 +++ b/src/pvr2/tacore.c Tue Jan 13 11:56:28 2009 +0000
24.3 @@ -119,8 +119,7 @@
24.4
24.5 #define TILESLOT( x, y ) (ta_status.current_tile_matrix + (ta_status.current_tile_size * (y * ta_status.width+ x) << 2))
24.6
24.7 -extern char *video_base;
24.8 -#define PVRRAM(addr) (*(uint32_t *)(video_base + ((addr)&PVR2_RAM_MASK)))
24.9 +#define PVRRAM(addr) (*(uint32_t *)(pvr2_main_ram + ((addr)&PVR2_RAM_MASK)))
24.10
24.11 struct pvr2_ta_vertex {
24.12 float x,y,z;
24.13 @@ -296,7 +295,7 @@
24.14
24.15 /* Initialize each tile to 0xF0000000 */
24.16 if( ta_status.current_tile_size != 0 ) {
24.17 - p = (uint32_t *)(video_base + ta_status.current_tile_matrix);
24.18 + p = (uint32_t *)(pvr2_main_ram + ta_status.current_tile_matrix);
24.19 for( i=0; i< ta_status.width * ta_status.height; i++ ) {
24.20 *p = 0xF0000000;
24.21 p += ta_status.current_tile_size;
24.22 @@ -346,7 +345,7 @@
24.23 int rv;
24.24 int posn = MMIO_READ( PVR2, TA_POLYPOS );
24.25 int end = MMIO_READ( PVR2, TA_POLYEND );
24.26 - uint32_t *target = (uint32_t *)(video_base + posn);
24.27 + uint32_t *target = (uint32_t *)(pvr2_main_ram + posn);
24.28 for( rv=0; rv < length; rv++ ) {
24.29 if( posn == end ) {
24.30 asic_event( EVENT_PVR_PRIM_ALLOC_FAIL );
24.31 @@ -1200,3 +1199,11 @@
24.32 buf += 32;
24.33 }
24.34 }
24.35 +
24.36 +void FASTCALL pvr2_ta_write_burst( sh4addr_t addr, unsigned char *data )
24.37 +{
24.38 + if( ta_status.debug_output ) {
24.39 + fwrite_dump32( (uint32_t *)data, 32, stderr );
24.40 + }
24.41 + pvr2_ta_process_block( data );
24.42 +}
25.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
25.2 +++ b/src/sdram.c Tue Jan 13 11:56:28 2009 +0000
25.3 @@ -0,0 +1,65 @@
25.4 +/**
25.5 + * $Id$
25.6 + *
25.7 + * Dreamcast main SDRAM - access methods and timing controls. This is fairly
25.8 + * directly coupled to the SH4
25.9 + *
25.10 + * Copyright (c) 2005 Nathan Keynes.
25.11 + *
25.12 + * This program is free software; you can redistribute it and/or modify
25.13 + * it under the terms of the GNU General Public License as published by
25.14 + * the Free Software Foundation; either version 2 of the License, or
25.15 + * (at your option) any later version.
25.16 + *
25.17 + * This program is distributed in the hope that it will be useful,
25.18 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
25.19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25.20 + * GNU General Public License for more details.
25.21 + */
25.22 +
25.23 +#include "lxdream.h"
25.24 +#include "mem.h"
25.25 +#include "dreamcast.h"
25.26 +#include <string.h>
25.27 +
25.28 +
25.29 +static int32_t FASTCALL ext_sdram_read_long( sh4addr_t addr )
25.30 +{
25.31 + return *((int32_t *)(dc_main_ram + (addr&0x00FFFFFF)));
25.32 +}
25.33 +static int32_t FASTCALL ext_sdram_read_word( sh4addr_t addr )
25.34 +{
25.35 + return SIGNEXT16(*((int16_t *)(dc_main_ram + (addr&0x00FFFFFF))));
25.36 +}
25.37 +static int32_t FASTCALL ext_sdram_read_byte( sh4addr_t addr )
25.38 +{
25.39 + return SIGNEXT8(*((int16_t *)(dc_main_ram + (addr&0x00FFFFFF))));
25.40 +}
25.41 +static void FASTCALL ext_sdram_write_long( sh4addr_t addr, uint32_t val )
25.42 +{
25.43 + *(uint32_t *)(dc_main_ram + (addr&0x00FFFFFF)) = val;
25.44 + xlat_invalidate_long(addr);
25.45 +}
25.46 +static void FASTCALL ext_sdram_write_word( sh4addr_t addr, uint32_t val )
25.47 +{
25.48 + *(uint16_t *)(dc_main_ram + (addr&0x00FFFFFF)) = (uint16_t)val;
25.49 + xlat_invalidate_word(addr);
25.50 +}
25.51 +static void FASTCALL ext_sdram_write_byte( sh4addr_t addr, uint32_t val )
25.52 +{
25.53 + *(uint8_t *)(dc_main_ram + (addr&0x00FFFFFF)) = (uint8_t)val;
25.54 + xlat_invalidate_word(addr);
25.55 +}
25.56 +static void FASTCALL ext_sdram_read_burst( unsigned char *dest, sh4addr_t addr )
25.57 +{
25.58 + memcpy( dest, dc_main_ram+(addr&0x00FFFFFF), 32 );
25.59 +}
25.60 +static void FASTCALL ext_sdram_write_burst( sh4addr_t addr, unsigned char *src )
25.61 +{
25.62 + memcpy( dc_main_ram+(addr&0x00FFFFFF), src, 32 );
25.63 +}
25.64 +
25.65 +struct mem_region_fn mem_region_sdram = { ext_sdram_read_long, ext_sdram_write_long,
25.66 + ext_sdram_read_word, ext_sdram_write_word,
25.67 + ext_sdram_read_byte, ext_sdram_write_byte,
25.68 + ext_sdram_read_burst, ext_sdram_write_burst };
26.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
26.2 +++ b/src/sh4/cache.c Tue Jan 13 11:56:28 2009 +0000
26.3 @@ -0,0 +1,360 @@
26.4 +/**
26.5 + * $Id$
26.6 + * Implements the on-chip operand cache, instruction cache, and store queue.
26.7 + *
26.8 + * Copyright (c) 2008 Nathan Keynes.
26.9 + *
26.10 + * This program is free software; you can redistribute it and/or modify
26.11 + * it under the terms of the GNU General Public License as published by
26.12 + * the Free Software Foundation; either version 2 of the License, or
26.13 + * (at your option) any later version.
26.14 + *
26.15 + * This program is distributed in the hope that it will be useful,
26.16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
26.17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26.18 + * GNU General Public License for more details.
26.19 + */
26.20 +
26.21 +#define MODULE sh4_module
26.22 +
26.23 +#include <string.h>
26.24 +#include "dream.h"
26.25 +#include "mem.h"
26.26 +#include "mmio.h"
26.27 +#include "sh4/sh4core.h"
26.28 +#include "sh4/sh4mmio.h"
26.29 +#include "sh4/xltcache.h"
26.30 +#include "sh4/mmu.h"
26.31 +
26.32 +#define OCRAM_START (0x7C000000>>LXDREAM_PAGE_BITS)
26.33 +#define OCRAM_MID (0x7E000000>>LXDREAM_PAGE_BITS)
26.34 +#define OCRAM_END (0x80000000>>LXDREAM_PAGE_BITS)
26.35 +
26.36 +#define CACHE_VALID 1
26.37 +#define CACHE_DIRTY 2
26.38 +
26.39 +#define ICACHE_ENTRY_COUNT 256
26.40 +#define OCACHE_ENTRY_COUNT 512
26.41 +
26.42 +struct cache_line {
26.43 + uint32_t key; // Fast address match - bits 5..28 for valid entry, -1 for invalid entry
26.44 + uint32_t tag; // tag + flags value from the address field
26.45 +};
26.46 +
26.47 +
26.48 +static struct cache_line ccn_icache[ICACHE_ENTRY_COUNT];
26.49 +static struct cache_line ccn_ocache[OCACHE_ENTRY_COUNT];
26.50 +static unsigned char ccn_icache_data[ICACHE_ENTRY_COUNT*32];
26.51 +static unsigned char ccn_ocache_data[OCACHE_ENTRY_COUNT*32];
26.52 +
26.53 +
26.54 +/*********************** General module requirements ********************/
26.55 +
26.56 +void CCN_save_state( FILE *f )
26.57 +{
26.58 + fwrite( &ccn_icache, sizeof(ccn_icache), 1, f );
26.59 + fwrite( &ccn_icache_data, sizeof(ccn_icache_data), 1, f );
26.60 + fwrite( &ccn_ocache, sizeof(ccn_ocache), 1, f);
26.61 + fwrite( &ccn_ocache_data, sizeof(ccn_ocache_data), 1, f);
26.62 +}
26.63 +
26.64 +int CCN_load_state( FILE *f )
26.65 +{
26.66 + /* Setup the cache mode according to the saved register value
26.67 + * (mem_load runs before this point to load all MMIO data)
26.68 + */
26.69 + mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
26.70 +
26.71 + if( fread( &ccn_icache, sizeof(ccn_icache), 1, f ) != 1 ) {
26.72 + return 1;
26.73 + }
26.74 + if( fread( &ccn_icache_data, sizeof(ccn_icache_data), 1, f ) != 1 ) {
26.75 + return 1;
26.76 + }
26.77 + if( fread( &ccn_ocache, sizeof(ccn_ocache), 1, f ) != 1 ) {
26.78 + return 1;
26.79 + }
26.80 + if( fread( &ccn_ocache_data, sizeof(ccn_ocache_data), 1, f ) != 1 ) {
26.81 + return 1;
26.82 + }
26.83 + return 0;
26.84 +}
26.85 +
26.86 +/************************* OCRAM memory address space ************************/
26.87 +
26.88 +#define OCRAMPAGE0 (&ccn_ocache_data[4096]) /* Lines 128-255 */
26.89 +#define OCRAMPAGE1 (&ccn_ocache_data[12288]) /* Lines 384-511 */
26.90 +
26.91 +static int32_t FASTCALL ocram_page0_read_long( sh4addr_t addr )
26.92 +{
26.93 + return *((int32_t *)(OCRAMPAGE0 + (addr&0x00000FFF)));
26.94 +}
26.95 +static int32_t FASTCALL ocram_page0_read_word( sh4addr_t addr )
26.96 +{
26.97 + return SIGNEXT16(*((int16_t *)(OCRAMPAGE0 + (addr&0x00000FFF))));
26.98 +}
26.99 +static int32_t FASTCALL ocram_page0_read_byte( sh4addr_t addr )
26.100 +{
26.101 + return SIGNEXT8(*((int16_t *)(OCRAMPAGE0 + (addr&0x00000FFF))));
26.102 +}
26.103 +static void FASTCALL ocram_page0_write_long( sh4addr_t addr, uint32_t val )
26.104 +{
26.105 + *(uint32_t *)(OCRAMPAGE0 + (addr&0x00000FFF)) = val;
26.106 +}
26.107 +static void FASTCALL ocram_page0_write_word( sh4addr_t addr, uint32_t val )
26.108 +{
26.109 + *(uint16_t *)(OCRAMPAGE0 + (addr&0x00000FFF)) = (uint16_t)val;
26.110 +}
26.111 +static void FASTCALL ocram_page0_write_byte( sh4addr_t addr, uint32_t val )
26.112 +{
26.113 + *(uint8_t *)(OCRAMPAGE0 + (addr&0x00000FFF)) = (uint8_t)val;
26.114 +}
26.115 +static void FASTCALL ocram_page0_read_burst( unsigned char *dest, sh4addr_t addr )
26.116 +{
26.117 + memcpy( dest, OCRAMPAGE0+(addr&0x00000FFF), 32 );
26.118 +}
26.119 +static void FASTCALL ocram_page0_write_burst( sh4addr_t addr, unsigned char *src )
26.120 +{
26.121 + memcpy( OCRAMPAGE0+(addr&0x00000FFF), src, 32 );
26.122 +}
26.123 +
26.124 +struct mem_region_fn mem_region_ocram_page0 = {
26.125 + ocram_page0_read_long, ocram_page0_write_long,
26.126 + ocram_page0_read_word, ocram_page0_write_word,
26.127 + ocram_page0_read_byte, ocram_page0_write_byte,
26.128 + ocram_page0_read_burst, ocram_page0_write_burst,
26.129 + unmapped_prefetch };
26.130 +
26.131 +static int32_t FASTCALL ocram_page1_read_long( sh4addr_t addr )
26.132 +{
26.133 + return *((int32_t *)(OCRAMPAGE1 + (addr&0x00000FFF)));
26.134 +}
26.135 +static int32_t FASTCALL ocram_page1_read_word( sh4addr_t addr )
26.136 +{
26.137 + return SIGNEXT16(*((int16_t *)(OCRAMPAGE1 + (addr&0x00000FFF))));
26.138 +}
26.139 +static int32_t FASTCALL ocram_page1_read_byte( sh4addr_t addr )
26.140 +{
26.141 + return SIGNEXT8(*((int16_t *)(OCRAMPAGE1 + (addr&0x00000FFF))));
26.142 +}
26.143 +static void FASTCALL ocram_page1_write_long( sh4addr_t addr, uint32_t val )
26.144 +{
26.145 + *(uint32_t *)(OCRAMPAGE1 + (addr&0x00000FFF)) = val;
26.146 +}
26.147 +static void FASTCALL ocram_page1_write_word( sh4addr_t addr, uint32_t val )
26.148 +{
26.149 + *(uint16_t *)(OCRAMPAGE1 + (addr&0x00000FFF)) = (uint16_t)val;
26.150 +}
26.151 +static void FASTCALL ocram_page1_write_byte( sh4addr_t addr, uint32_t val )
26.152 +{
26.153 + *(uint8_t *)(OCRAMPAGE1 + (addr&0x00000FFF)) = (uint8_t)val;
26.154 +}
26.155 +static void FASTCALL ocram_page1_read_burst( unsigned char *dest, sh4addr_t addr )
26.156 +{
26.157 + memcpy( dest, OCRAMPAGE1+(addr&0x00000FFF), 32 );
26.158 +}
26.159 +static void FASTCALL ocram_page1_write_burst( sh4addr_t addr, unsigned char *src )
26.160 +{
26.161 + memcpy( OCRAMPAGE1+(addr&0x00000FFF), src, 32 );
26.162 +}
26.163 +
26.164 +struct mem_region_fn mem_region_ocram_page1 = {
26.165 + ocram_page1_read_long, ocram_page1_write_long,
26.166 + ocram_page1_read_word, ocram_page1_write_word,
26.167 + ocram_page1_read_byte, ocram_page1_write_byte,
26.168 + ocram_page1_read_burst, ocram_page1_write_burst,
26.169 + unmapped_prefetch };
26.170 +
26.171 +/************************** Cache direct access ******************************/
26.172 +
26.173 +static int32_t ccn_icache_addr_read( sh4addr_t addr )
26.174 +{
26.175 + int entry = (addr & 0x00001FE0);
26.176 + return ccn_icache[entry>>5].tag;
26.177 +}
26.178 +
26.179 +static void ccn_icache_addr_write( sh4addr_t addr, uint32_t val )
26.180 +{
26.181 + int entry = (addr & 0x00003FE0);
26.182 + struct cache_line *line = &ccn_ocache[entry>>5];
26.183 + if( addr & 0x08 ) { // Associative
26.184 + /* FIXME: implement this - requires ITLB lookups, with exception in case of multi-hit */
26.185 + } else {
26.186 + line->tag = val & 0x1FFFFC01;
26.187 + line->key = (val & 0x1FFFFC00)|(entry & 0x000003E0);
26.188 + }
26.189 +}
26.190 +
26.191 +struct mem_region_fn p4_region_icache_addr = {
26.192 + ccn_icache_addr_read, ccn_icache_addr_write,
26.193 + unmapped_read_long, unmapped_write_long,
26.194 + unmapped_read_long, unmapped_write_long,
26.195 + unmapped_read_burst, unmapped_write_burst,
26.196 + unmapped_prefetch };
26.197 +
26.198 +
26.199 +static int32_t ccn_icache_data_read( sh4addr_t addr )
26.200 +{
26.201 + int entry = (addr & 0x00001FFC);
26.202 + return *(uint32_t *)&ccn_icache_data[entry];
26.203 +}
26.204 +
26.205 +static void ccn_icache_data_write( sh4addr_t addr, uint32_t val )
26.206 +{
26.207 + int entry = (addr & 0x00001FFC);
26.208 + *(uint32_t *)&ccn_icache_data[entry] = val;
26.209 +}
26.210 +
26.211 +struct mem_region_fn p4_region_icache_data = {
26.212 + ccn_icache_data_read, ccn_icache_data_write,
26.213 + unmapped_read_long, unmapped_write_long,
26.214 + unmapped_read_long, unmapped_write_long,
26.215 + unmapped_read_burst, unmapped_write_burst,
26.216 + unmapped_prefetch };
26.217 +
26.218 +
26.219 +static int32_t ccn_ocache_addr_read( sh4addr_t addr )
26.220 +{
26.221 + int entry = (addr & 0x00003FE0);
26.222 + return ccn_ocache[entry>>5].tag;
26.223 +}
26.224 +
26.225 +static void ccn_ocache_addr_write( sh4addr_t addr, uint32_t val )
26.226 +{
26.227 + int entry = (addr & 0x00003FE0);
26.228 + struct cache_line *line = &ccn_ocache[entry>>5];
26.229 + if( addr & 0x08 ) { // Associative
26.230 + } else {
26.231 + if( (line->tag & (CACHE_VALID|CACHE_DIRTY)) == (CACHE_VALID|CACHE_DIRTY) ) {
26.232 + char *cache_data = &ccn_ocache_data[entry&0x00003FE0];
26.233 + // Cache line is dirty - writeback.
26.234 + ext_address_space[line->tag>>12]->write_burst(line->key, cache_data);
26.235 + }
26.236 + line->tag = val & 0x1FFFFC03;
26.237 + line->key = (val & 0x1FFFFC00)|(entry & 0x000003E0);
26.238 + }
26.239 +}
26.240 +
26.241 +struct mem_region_fn p4_region_ocache_addr = {
26.242 + ccn_ocache_addr_read, ccn_ocache_addr_write,
26.243 + unmapped_read_long, unmapped_write_long,
26.244 + unmapped_read_long, unmapped_write_long,
26.245 + unmapped_read_burst, unmapped_write_burst,
26.246 + unmapped_prefetch };
26.247 +
26.248 +
26.249 +static int32_t ccn_ocache_data_read( sh4addr_t addr )
26.250 +{
26.251 + int entry = (addr & 0x00003FFC);
26.252 + return *(uint32_t *)&ccn_ocache_data[entry];
26.253 +}
26.254 +
26.255 +static void ccn_ocache_data_write( sh4addr_t addr, uint32_t val )
26.256 +{
26.257 + int entry = (addr & 0x00003FFC);
26.258 + *(uint32_t *)&ccn_ocache_data[entry] = val;
26.259 +}
26.260 +
26.261 +struct mem_region_fn p4_region_ocache_data = {
26.262 + ccn_ocache_data_read, ccn_ocache_data_write,
26.263 + unmapped_read_long, unmapped_write_long,
26.264 + unmapped_read_long, unmapped_write_long,
26.265 + unmapped_read_burst, unmapped_write_burst,
26.266 + unmapped_prefetch };
26.267 +
26.268 +
26.269 +/****************** Cache control *********************/
26.270 +
26.271 +void CCN_set_cache_control( int reg )
26.272 +{
26.273 + uint32_t i;
26.274 +
26.275 + if( reg & CCR_ICI ) { /* icache invalidate */
26.276 + for( i=0; i<ICACHE_ENTRY_COUNT; i++ ) {
26.277 + ccn_icache[i].tag &= ~CACHE_VALID;
26.278 + }
26.279 + }
26.280 +
26.281 + if( reg & CCR_OCI ) { /* ocache invalidate */
26.282 + for( i=0; i<OCACHE_ENTRY_COUNT; i++ ) {
26.283 + ccn_ocache[i].tag &= ~(CACHE_VALID|CACHE_DIRTY);
26.284 + }
26.285 + }
26.286 +
26.287 + switch( reg & (CCR_OIX|CCR_ORA|CCR_OCE) ) {
26.288 + case MEM_OC_INDEX0: /* OIX=0 */
26.289 + for( i=OCRAM_START; i<OCRAM_END; i+=4 ) {
26.290 + sh4_address_space[i] = &mem_region_ocram_page0;
26.291 + sh4_address_space[i+1] = &mem_region_ocram_page0;
26.292 + sh4_address_space[i+2] = &mem_region_ocram_page1;
26.293 + sh4_address_space[i+3] = &mem_region_ocram_page1;
26.294 + }
26.295 + break;
26.296 + case MEM_OC_INDEX1: /* OIX=1 */
26.297 + for( i=OCRAM_START; i<OCRAM_MID; i++ )
26.298 + sh4_address_space[i] = &mem_region_ocram_page0;
26.299 + for( i=OCRAM_MID; i<OCRAM_END; i++ )
26.300 + sh4_address_space[i] = &mem_region_ocram_page1;
26.301 + break;
26.302 + default: /* disabled */
26.303 + for( i=OCRAM_START; i<OCRAM_END; i++ )
26.304 + sh4_address_space[i] = &mem_region_unmapped;
26.305 + break;
26.306 + }
26.307 +}
26.308 +
26.309 +/**
26.310 + * Prefetch for non-storequeue regions
26.311 + */
26.312 +void FASTCALL ccn_prefetch( sh4addr_t addr )
26.313 +{
26.314 +
26.315 +}
26.316 +
26.317 +/**
26.318 + * Prefetch for non-cached regions. Oddly enough, this does nothing whatsoever.
26.319 + */
26.320 +void FASTCALL ccn_uncached_prefetch( sh4addr_t addr )
26.321 +{
26.322 +
26.323 +}
26.324 +/********************************* Store-queue *******************************/
26.325 +/*
26.326 + * The storequeue is strictly speaking part of the cache, but most of
26.327 + * the complexity is actually around its addressing (ie in the MMU). The
26.328 + * methods here can assume we've already passed SQMD protection and the TLB
26.329 + * lookups (where appropriate).
26.330 + */
26.331 +void FASTCALL ccn_storequeue_write_long( sh4addr_t addr, uint32_t val )
26.332 +{
26.333 + sh4r.store_queue[(addr>>2)&0xF] = val;
26.334 +}
26.335 +int32_t FASTCALL ccn_storequeue_read_long( sh4addr_t addr )
26.336 +{
26.337 + return sh4r.store_queue[(addr>>2)&0xF];
26.338 +}
26.339 +
26.340 +/**
26.341 + * Variant used when tlb is disabled - address will be the original prefetch
26.342 + * address (ie 0xE0001234). Due to the way the SQ addressing is done, it can't
26.343 + * be hardcoded on 4K page boundaries, so we manually decode it here.
26.344 + */
26.345 +void FASTCALL ccn_storequeue_prefetch( sh4addr_t addr )
26.346 +{
26.347 + int queue = (addr&0x20)>>2;
26.348 + sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
26.349 + uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
26.350 + sh4addr_t target = (addr&0x03FFFFE0) | hi;
26.351 + ext_address_space[target>>12]->write_burst( target, src );
26.352 +}
26.353 +
26.354 +/**
26.355 + * Variant used when tlb is enabled - address in this case is already
26.356 + * mapped to the external target address.
26.357 + */
26.358 +void FASTCALL ccn_storequeue_prefetch_tlb( sh4addr_t addr )
26.359 +{
26.360 + int queue = (addr&0x20)>>2;
26.361 + sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
26.362 + ext_address_space[addr>>12]->write_burst( (addr & 0x1FFFFFE0), src );
26.363 +}
27.1 --- a/src/sh4/dmac.c Mon Dec 15 10:44:56 2008 +0000
27.2 +++ b/src/sh4/dmac.c Tue Jan 13 11:56:28 2009 +0000
27.3 @@ -82,13 +82,14 @@
27.4 */
27.5 }
27.6
27.7 -int32_t mmio_region_DMAC_read( uint32_t reg )
27.8 +MMIO_REGION_READ_FN( DMAC, reg )
27.9 {
27.10 - return MMIO_READ( DMAC, reg );
27.11 + return MMIO_READ( DMAC, reg&0xFFF );
27.12 }
27.13
27.14 -void mmio_region_DMAC_write( uint32_t reg, uint32_t val )
27.15 +MMIO_REGION_WRITE_FN( DMAC, reg, val )
27.16 {
27.17 + reg &= 0xFFF;
27.18 switch( reg ) {
27.19 case DMAOR:
27.20 MMIO_WRITE( DMAC, reg, val );
28.1 --- a/src/sh4/ia32abi.h Mon Dec 15 10:44:56 2008 +0000
28.2 +++ b/src/sh4/ia32abi.h Tue Jan 13 11:56:28 2009 +0000
28.3 @@ -24,13 +24,22 @@
28.4
28.5 #define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
28.6
28.7 +static inline decode_address( int addr_reg )
28.8 +{
28.9 + uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
28.10 + MOV_r32_r32( addr_reg, R_ECX );
28.11 + SHR_imm8_r32( 12, R_ECX );
28.12 + MOV_r32disp32x4_r32( R_ECX, base, R_ECX );
28.13 +}
28.14 +
28.15 /**
28.16 * Note: clobbers EAX to make the indirect call - this isn't usually
28.17 * a problem since the callee will usually clobber it anyway.
28.18 */
28.19 static inline void call_func0( void *ptr )
28.20 {
28.21 - CALL_ptr(ptr);
28.22 + load_imm32(R_ECX, (uint32_t)ptr);
28.23 + CALL_r32(R_ECX);
28.24 }
28.25
28.26 #ifdef HAVE_FASTCALL
28.27 @@ -39,7 +48,33 @@
28.28 if( arg1 != R_EAX ) {
28.29 MOV_r32_r32( arg1, R_EAX );
28.30 }
28.31 - CALL_ptr(ptr);
28.32 + load_imm32(R_ECX, (uint32_t)ptr);
28.33 + CALL_r32(R_ECX);
28.34 +}
28.35 +
28.36 +static inline void call_func1_r32( int addr_reg, int arg1 )
28.37 +{
28.38 + if( arg1 != R_EAX ) {
28.39 + MOV_r32_r32( arg1, R_EAX );
28.40 + }
28.41 + CALL_r32(addr_reg);
28.42 +}
28.43 +
28.44 +static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
28.45 +{
28.46 + if( arg1 != R_EAX ) {
28.47 + MOV_r32_r32( arg1, R_EAX );
28.48 + }
28.49 + CALL_r32disp8(preg, disp8);
28.50 +}
28.51 +
28.52 +static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
28.53 +{
28.54 + if( arg1 != R_EAX ) {
28.55 + MOV_r32_r32( arg1, R_EAX );
28.56 + }
28.57 + load_exc_backpatch(R_EDX);
28.58 + CALL_r32disp8(preg, disp8);
28.59 }
28.60
28.61 static inline void call_func2( void *ptr, int arg1, int arg2 )
28.62 @@ -50,16 +85,54 @@
28.63 if( arg1 != R_EAX ) {
28.64 MOV_r32_r32( arg1, R_EAX );
28.65 }
28.66 - CALL_ptr(ptr);
28.67 + load_imm32(R_ECX, (uint32_t)ptr);
28.68 + CALL_r32(R_ECX);
28.69 }
28.70
28.71 +static inline void call_func2_r32( int addr_reg, int arg1, int arg2 )
28.72 +{
28.73 + if( arg2 != R_EDX ) {
28.74 + MOV_r32_r32( arg2, R_EDX );
28.75 + }
28.76 + if( arg1 != R_EAX ) {
28.77 + MOV_r32_r32( arg1, R_EAX );
28.78 + }
28.79 + CALL_r32(addr_reg);
28.80 +}
28.81 +
28.82 +static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
28.83 +{
28.84 + if( arg2 != R_EDX ) {
28.85 + MOV_r32_r32( arg2, R_EDX );
28.86 + }
28.87 + if( arg1 != R_EAX ) {
28.88 + MOV_r32_r32( arg1, R_EAX );
28.89 + }
28.90 + CALL_r32disp8(preg, disp8);
28.91 +}
28.92 +
28.93 +static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
28.94 +{
28.95 + if( arg2 != R_EDX ) {
28.96 + MOV_r32_r32( arg2, R_EDX );
28.97 + }
28.98 + if( arg1 != R_EAX ) {
28.99 + MOV_r32_r32( arg1, R_EAX );
28.100 + }
28.101 + MOV_backpatch_esp8( 0 );
28.102 + CALL_r32disp8(preg, disp8);
28.103 +}
28.104 +
28.105 +
28.106 +
28.107 static inline void call_func1_exc( void *ptr, int arg1, int pc )
28.108 {
28.109 if( arg1 != R_EAX ) {
28.110 MOV_r32_r32( arg1, R_EAX );
28.111 }
28.112 load_exc_backpatch(R_EDX);
28.113 - CALL_ptr(ptr);
28.114 + load_imm32(R_ECX, (uint32_t)ptr);
28.115 + CALL_r32(R_ECX);
28.116 }
28.117
28.118 static inline void call_func2_exc( void *ptr, int arg1, int arg2, int pc )
28.119 @@ -70,48 +143,18 @@
28.120 if( arg1 != R_EAX ) {
28.121 MOV_r32_r32( arg1, R_EAX );
28.122 }
28.123 - load_exc_backpatch(R_ECX);
28.124 - CALL_ptr(ptr);
28.125 + MOV_backpatch_esp8(0);
28.126 + load_imm32(R_ECX, (uint32_t)ptr);
28.127 + CALL_r32(R_ECX);
28.128 }
28.129
28.130 -/**
28.131 - * Write a double (64-bit) value into memory, with the first word in arg2a, and
28.132 - * the second in arg2b
28.133 - */
28.134 -static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
28.135 -{
28.136 - MOV_r32_esp8(addr, 0);
28.137 - MOV_r32_esp8(arg2b, 4);
28.138 - call_func2(sh4_write_long, addr, arg2a);
28.139 - MOV_esp8_r32(0, R_EAX);
28.140 - MOV_esp8_r32(4, R_EDX);
28.141 - ADD_imm8s_r32(4, R_EAX);
28.142 - call_func0(sh4_write_long);
28.143 -}
28.144 -
28.145 -/**
28.146 - * Read a double (64-bit) value from memory, writing the first word into arg2a
28.147 - * and the second into arg2b. The addr must not be in EAX
28.148 - */
28.149 -static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
28.150 -{
28.151 - MOV_r32_esp8(addr, 0);
28.152 - call_func1(sh4_read_long, addr);
28.153 - MOV_r32_esp8(R_EAX, 4);
28.154 - MOV_esp8_r32(0, R_EAX);
28.155 - ADD_imm8s_r32(4, R_EAX);
28.156 - call_func0(sh4_read_long);
28.157 - if( arg2b != R_EAX ) {
28.158 - MOV_r32_r32(R_EAX, arg2b);
28.159 - }
28.160 - MOV_esp8_r32(4, arg2a);
28.161 -}
28.162 #else
28.163 static inline void call_func1( void *ptr, int arg1 )
28.164 {
28.165 SUB_imm8s_r32( 12, R_ESP );
28.166 PUSH_r32(arg1);
28.167 - CALL_ptr(ptr);
28.168 + load_imm32(R_ECX, (uint32_t)ptr);
28.169 + CALL_r32(R_ECX);
28.170 ADD_imm8s_r32( 16, R_ESP );
28.171 }
28.172
28.173 @@ -120,45 +163,8 @@
28.174 SUB_imm8s_r32( 8, R_ESP );
28.175 PUSH_r32(arg2);
28.176 PUSH_r32(arg1);
28.177 - CALL_ptr(ptr);
28.178 - ADD_imm8s_r32( 16, R_ESP );
28.179 -}
28.180 -
28.181 -/**
28.182 - * Write a double (64-bit) value into memory, with the first word in arg2a, and
28.183 - * the second in arg2b
28.184 - */
28.185 -static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
28.186 -{
28.187 - SUB_imm8s_r32( 8, R_ESP );
28.188 - PUSH_r32(arg2b);
28.189 - LEA_r32disp8_r32( addr, 4, arg2b );
28.190 - PUSH_r32(arg2b);
28.191 - SUB_imm8s_r32( 8, R_ESP );
28.192 - PUSH_r32(arg2a);
28.193 - PUSH_r32(addr);
28.194 - CALL_ptr(sh4_write_long);
28.195 - ADD_imm8s_r32( 16, R_ESP );
28.196 - CALL_ptr(sh4_write_long);
28.197 - ADD_imm8s_r32( 16, R_ESP );
28.198 -}
28.199 -
28.200 -/**
28.201 - * Read a double (64-bit) value from memory, writing the first word into arg2a
28.202 - * and the second into arg2b. The addr must not be in EAX
28.203 - */
28.204 -static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
28.205 -{
28.206 - SUB_imm8s_r32( 12, R_ESP );
28.207 - PUSH_r32(addr);
28.208 - CALL_ptr(sh4_read_long);
28.209 - MOV_r32_esp8(R_EAX, 4);
28.210 - ADD_imm8s_esp8(4, 0);
28.211 - CALL_ptr(sh4_read_long);
28.212 - if( arg2b != R_EAX ) {
28.213 - MOV_r32_r32( R_EAX, arg2b );
28.214 - }
28.215 - MOV_esp8_r32( 4, arg2a );
28.216 + load_imm32(R_ECX, (uint32_t)ptr);
28.217 + CALL_r32(R_ECX);
28.218 ADD_imm8s_r32( 16, R_ESP );
28.219 }
28.220
29.1 --- a/src/sh4/ia64abi.h Mon Dec 15 10:44:56 2008 +0000
29.2 +++ b/src/sh4/ia64abi.h Tue Jan 13 11:56:28 2009 +0000
29.3 @@ -24,6 +24,15 @@
29.4
29.5 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
29.6
29.7 +static inline decode_address( int addr_reg )
29.8 +{
29.9 + uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
29.10 + MOV_r32_r32( addr_reg, R_ECX );
29.11 + SHR_imm8_r32( 12, R_ECX );
29.12 + load_ptr( R_EDI, base );
29.13 + REXW(); OP(0x8B); OP(0x0C); OP(0xCF); // mov.q [%rdi + %rcx*8], %rcx
29.14 +}
29.15 +
29.16 /**
29.17 * Note: clobbers EAX to make the indirect call - this isn't usually
29.18 * a problem since the callee will usually clobber it anyway.
29.19 @@ -50,6 +59,19 @@
29.20 call_func0(ptr);
29.21 }
29.22
29.23 +static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
29.24 +{
29.25 + REXW(); MOV_r32_r32(arg1, R_EDI);
29.26 + CALL_r32disp8(preg, disp8);
29.27 +}
29.28 +
29.29 +static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
29.30 +{
29.31 + REXW(); MOV_r32_r32(arg1, R_EDI);
29.32 + load_exc_backpatch(R_ESI);
29.33 + CALL_r32disp8(preg, disp8);
29.34 +}
29.35 +
29.36 #define CALL_FUNC2_SIZE 16
29.37 static inline void call_func2( void *ptr, int arg1, int arg2 )
29.38 {
29.39 @@ -58,42 +80,23 @@
29.40 call_func0(ptr);
29.41 }
29.42
29.43 -#define MEM_WRITE_DOUBLE_SIZE 35
29.44 -/**
29.45 - * Write a double (64-bit) value into memory, with the first word in arg2a, and
29.46 - * the second in arg2b
29.47 - */
29.48 -static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
29.49 +static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
29.50 {
29.51 - PUSH_r32(arg2b);
29.52 - PUSH_r32(addr);
29.53 - call_func2(sh4_write_long, addr, arg2a);
29.54 - POP_r32(R_EDI);
29.55 - POP_r32(R_ESI);
29.56 - ADD_imm8s_r32(4, R_EDI);
29.57 - call_func0(sh4_write_long);
29.58 + REXW(); MOV_r32_r32(arg1, R_EDI);
29.59 + REXW(); MOV_r32_r32(arg2, R_ESI);
29.60 + CALL_r32disp8(preg, disp8);
29.61 }
29.62
29.63 -#define MEM_READ_DOUBLE_SIZE 43
29.64 -/**
29.65 - * Read a double (64-bit) value from memory, writing the first word into arg2a
29.66 - * and the second into arg2b. The addr must not be in EAX
29.67 - */
29.68 -static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
29.69 +static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
29.70 {
29.71 - REXW(); SUB_imm8s_r32( 8, R_ESP );
29.72 - PUSH_r32(addr);
29.73 - call_func1(sh4_read_long, addr);
29.74 - POP_r32(R_EDI);
29.75 - PUSH_r32(R_EAX);
29.76 - ADD_imm8s_r32(4, R_EDI);
29.77 - call_func0(sh4_read_long);
29.78 - MOV_r32_r32(R_EAX, arg2b);
29.79 - POP_r32(arg2a);
29.80 - REXW(); ADD_imm8s_r32( 8, R_ESP );
29.81 + REXW(); MOV_r32_r32(arg1, R_EDI);
29.82 + REXW(); MOV_r32_r32(arg2, R_ESI);
29.83 + load_exc_backpatch(R_EDX);
29.84 + CALL_r32disp8(preg, disp8);
29.85 }
29.86
29.87
29.88 +
29.89 /**
29.90 * Emit the 'start of block' assembly. Sets up the stack frame and save
29.91 * SI/DI as required
30.1 --- a/src/sh4/intc.c Mon Dec 15 10:44:56 2008 +0000
30.2 +++ b/src/sh4/intc.c Tue Jan 13 11:56:28 2009 +0000
30.3 @@ -53,8 +53,9 @@
30.4 int priority[INT_NUM_SOURCES];
30.5 } intc_state;
30.6
30.7 -void mmio_region_INTC_write( uint32_t reg, uint32_t val )
30.8 +MMIO_REGION_WRITE_FN( INTC, reg, val )
30.9 {
30.10 + reg &= 0xFFF;
30.11 /* Well it saves having to use an intermediate table... */
30.12 switch( reg ) {
30.13 case ICR: /* care about this later */
30.14 @@ -95,9 +96,9 @@
30.15 MMIO_WRITE( INTC, reg, val );
30.16 }
30.17
30.18 -int32_t mmio_region_INTC_read( uint32_t reg )
30.19 +MMIO_REGION_READ_FN( INTC, reg )
30.20 {
30.21 - return MMIO_READ( INTC, reg );
30.22 + return MMIO_READ( INTC, reg & 0xFFF );
30.23 }
30.24
30.25 void INTC_reset()
31.1 --- a/src/sh4/mmu.c Mon Dec 15 10:44:56 2008 +0000
31.2 +++ b/src/sh4/mmu.c Tue Jan 13 11:56:28 2009 +0000
31.3 @@ -1,7 +1,8 @@
31.4 /**
31.5 * $Id$
31.6 *
31.7 - * MMU implementation
31.8 + * SH4 MMU implementation based on address space page maps. This module
31.9 + * is responsible for all address decoding functions.
31.10 *
31.11 * Copyright (c) 2005 Nathan Keynes.
31.12 *
31.13 @@ -22,224 +23,142 @@
31.14 #include "sh4/sh4mmio.h"
31.15 #include "sh4/sh4core.h"
31.16 #include "sh4/sh4trans.h"
31.17 +#include "dreamcast.h"
31.18 #include "mem.h"
31.19 +#include "mmu.h"
31.20
31.21 -#ifdef HAVE_FRAME_ADDRESS
31.22 -#define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
31.23 -#else
31.24 -#define RETURN_VIA(exc) return MMU_VMA_ERROR
31.25 -#endif
31.26 -
31.27 -#define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
31.28 -
31.29 -/* The MMU (practically unique in the system) is allowed to raise exceptions
31.30 - * directly, with a return code indicating that one was raised and the caller
31.31 - * had better behave appropriately.
31.32 - */
31.33 -#define RAISE_TLB_ERROR(code, vpn) \
31.34 - MMIO_WRITE(MMU, TEA, vpn); \
31.35 - MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
31.36 - sh4_raise_tlb_exception(code);
31.37 -
31.38 +#define RAISE_TLB_ERROR(code, vpn) sh4_raise_tlb_exception(code, vpn)
31.39 #define RAISE_MEM_ERROR(code, vpn) \
31.40 MMIO_WRITE(MMU, TEA, vpn); \
31.41 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
31.42 sh4_raise_exception(code);
31.43 +#define RAISE_TLB_MULTIHIT_ERROR(vpn) sh4_raise_tlb_multihit(vpn)
31.44
31.45 -#define RAISE_OTHER_ERROR(code) \
31.46 - sh4_raise_exception(code);
31.47 -/**
31.48 - * Abort with a non-MMU address error. Caused by user-mode code attempting
31.49 - * to access privileged regions, or alignment faults.
31.50 - */
31.51 -#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
31.52 -#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
31.53 +/* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
31.54 +#define IS_1K_PAGE_ENTRY(ent) ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
31.55
31.56 -#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
31.57 -#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
31.58 -#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
31.59 -#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
31.60 -#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
31.61 -#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
31.62 - MMIO_WRITE(MMU, TEA, vpn); \
31.63 - MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
31.64 +/* Primary address space (used directly by SH4 cores) */
31.65 +mem_region_fn_t *sh4_address_space;
31.66 +mem_region_fn_t *sh4_user_address_space;
31.67
31.68 +/* Accessed from the UTLB accessor methods */
31.69 +uint32_t mmu_urc;
31.70 +uint32_t mmu_urb;
31.71 +static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */
31.72
31.73 -#define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
31.74 -#define OCRAM_END (0x20000000>>LXDREAM_PAGE_BITS)
31.75 -
31.76 -#define ITLB_ENTRY_COUNT 4
31.77 -#define UTLB_ENTRY_COUNT 64
31.78 -
31.79 -/* Entry address */
31.80 -#define TLB_VALID 0x00000100
31.81 -#define TLB_USERMODE 0x00000040
31.82 -#define TLB_WRITABLE 0x00000020
31.83 -#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
31.84 -#define TLB_SIZE_MASK 0x00000090
31.85 -#define TLB_SIZE_1K 0x00000000
31.86 -#define TLB_SIZE_4K 0x00000010
31.87 -#define TLB_SIZE_64K 0x00000080
31.88 -#define TLB_SIZE_1M 0x00000090
31.89 -#define TLB_CACHEABLE 0x00000008
31.90 -#define TLB_DIRTY 0x00000004
31.91 -#define TLB_SHARE 0x00000002
31.92 -#define TLB_WRITETHRU 0x00000001
31.93 -
31.94 -#define MASK_1K 0xFFFFFC00
31.95 -#define MASK_4K 0xFFFFF000
31.96 -#define MASK_64K 0xFFFF0000
31.97 -#define MASK_1M 0xFFF00000
31.98 -
31.99 -struct itlb_entry {
31.100 - sh4addr_t vpn; // Virtual Page Number
31.101 - uint32_t asid; // Process ID
31.102 - uint32_t mask;
31.103 - sh4addr_t ppn; // Physical Page Number
31.104 - uint32_t flags;
31.105 -};
31.106 -
31.107 -struct utlb_entry {
31.108 - sh4addr_t vpn; // Virtual Page Number
31.109 - uint32_t mask; // Page size mask
31.110 - uint32_t asid; // Process ID
31.111 - sh4addr_t ppn; // Physical Page Number
31.112 - uint32_t flags;
31.113 - uint32_t pcmcia; // extra pcmcia data - not used
31.114 -};
31.115 -
31.116 -struct utlb_sort_entry {
31.117 - sh4addr_t key; // Masked VPN + ASID
31.118 - uint32_t mask; // Mask + 0x00FF
31.119 - int entryNo;
31.120 -};
31.121 -
31.122 -
31.123 +/* Module globals */
31.124 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
31.125 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
31.126 -static uint32_t mmu_urc;
31.127 -static uint32_t mmu_urb;
31.128 +static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
31.129 static uint32_t mmu_lrui;
31.130 static uint32_t mmu_asid; // current asid
31.131 +static struct utlb_default_regions *mmu_user_storequeue_regions;
31.132
31.133 -static struct utlb_sort_entry mmu_utlb_sorted[UTLB_ENTRY_COUNT];
31.134 -static uint32_t mmu_utlb_entries; // Number of entries in mmu_utlb_sorted.
31.135 +/* Structures for 1K page handling */
31.136 +static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
31.137 +static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
31.138 +static int mmu_utlb_1k_free_index;
31.139
31.140 -static sh4ptr_t cache = NULL;
31.141
31.142 +/* Function prototypes */
31.143 static void mmu_invalidate_tlb();
31.144 -static void mmu_utlb_sorted_reset();
31.145 -static void mmu_utlb_sorted_reload();
31.146 +static void mmu_utlb_register_all();
31.147 +static void mmu_utlb_remove_entry(int);
31.148 +static void mmu_utlb_insert_entry(int);
31.149 +static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
31.150 +static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
31.151 +static void mmu_set_tlb_enabled( int tlb_on );
31.152 +static void mmu_set_tlb_asid( uint32_t asid );
31.153 +static void mmu_set_storequeue_protected( int protected, int tlb_on );
31.154 +static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
31.155 +static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
31.156 +static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
31.157 +static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
31.158 +static void mmu_utlb_1k_init();
31.159 +static struct utlb_1k_entry *mmu_utlb_1k_alloc();
31.160 +static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
31.161 +static void mmu_fix_urc();
31.162
31.163 +static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
31.164 +static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
31.165 +static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
31.166 +static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
31.167 +static uint32_t get_tlb_size_mask( uint32_t flags );
31.168 +static uint32_t get_tlb_size_pages( uint32_t flags );
31.169
31.170 -static uint32_t get_mask_for_flags( uint32_t flags )
31.171 -{
31.172 - switch( flags & TLB_SIZE_MASK ) {
31.173 - case TLB_SIZE_1K: return MASK_1K;
31.174 - case TLB_SIZE_4K: return MASK_4K;
31.175 - case TLB_SIZE_64K: return MASK_64K;
31.176 - case TLB_SIZE_1M: return MASK_1M;
31.177 - default: return 0; /* Unreachable */
31.178 - }
31.179 -}
31.180 +#define DEFAULT_REGIONS 0
31.181 +#define DEFAULT_STOREQUEUE_REGIONS 1
31.182 +#define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
31.183
31.184 -int32_t mmio_region_MMU_read( uint32_t reg )
31.185 -{
31.186 - switch( reg ) {
31.187 - case MMUCR:
31.188 - return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
31.189 - default:
31.190 - return MMIO_READ( MMU, reg );
31.191 - }
31.192 -}
31.193 +static struct utlb_default_regions mmu_default_regions[3] = {
31.194 + { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
31.195 + { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
31.196 + { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
31.197
31.198 -void mmio_region_MMU_write( uint32_t reg, uint32_t val )
31.199 -{
31.200 - uint32_t tmp;
31.201 - switch(reg) {
31.202 - case SH4VER:
31.203 - return;
31.204 - case PTEH:
31.205 - val &= 0xFFFFFCFF;
31.206 - if( (val & 0xFF) != mmu_asid ) {
31.207 - mmu_asid = val&0xFF;
31.208 - sh4_icache.page_vma = -1; // invalidate icache as asid has changed
31.209 - }
31.210 - break;
31.211 - case PTEL:
31.212 - val &= 0x1FFFFDFF;
31.213 - break;
31.214 - case PTEA:
31.215 - val &= 0x0000000F;
31.216 - break;
31.217 - case TRA:
31.218 - val &= 0x000003FC;
31.219 - break;
31.220 - case EXPEVT:
31.221 - case INTEVT:
31.222 - val &= 0x00000FFF;
31.223 - break;
31.224 - case MMUCR:
31.225 - if( val & MMUCR_TI ) {
31.226 - mmu_invalidate_tlb();
31.227 - }
31.228 - mmu_urc = (val >> 10) & 0x3F;
31.229 - mmu_urb = (val >> 18) & 0x3F;
31.230 - mmu_lrui = (val >> 26) & 0x3F;
31.231 - val &= 0x00000301;
31.232 - tmp = MMIO_READ( MMU, MMUCR );
31.233 - if( (val ^ tmp) & (MMUCR_AT|MMUCR_SV) ) {
31.234 - // AT flag has changed state - flush the xlt cache as all bets
31.235 - // are off now. We also need to force an immediate exit from the
31.236 - // current block
31.237 - MMIO_WRITE( MMU, MMUCR, val );
31.238 - sh4_flush_icache();
31.239 - }
31.240 - break;
31.241 - case CCR:
31.242 - mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
31.243 - val &= 0x81A7;
31.244 - break;
31.245 - case MMUUNK1:
31.246 - /* Note that if the high bit is set, this appears to reset the machine.
31.247 - * Not emulating this behaviour yet until we know why...
31.248 - */
31.249 - val &= 0x00010007;
31.250 - break;
31.251 - case QACR0:
31.252 - case QACR1:
31.253 - val &= 0x0000001C;
31.254 - break;
31.255 - case PMCR1:
31.256 - PMM_write_control(0, val);
31.257 - val &= 0x0000C13F;
31.258 - break;
31.259 - case PMCR2:
31.260 - PMM_write_control(1, val);
31.261 - val &= 0x0000C13F;
31.262 - break;
31.263 - default:
31.264 - break;
31.265 - }
31.266 - MMIO_WRITE( MMU, reg, val );
31.267 -}
31.268 +#define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
31.269
31.270 +/*********************** Module public functions ****************************/
31.271
31.272 +/**
31.273 + * Allocate memory for the address space maps, and initialize them according
31.274 + * to the default (reset) values. (TLB is disabled by default)
31.275 + */
31.276 +
31.277 void MMU_init()
31.278 {
31.279 - cache = mem_alloc_pages(2);
31.280 + sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
31.281 + sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
31.282 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
31.283 +
31.284 + mmu_set_tlb_enabled(0);
31.285 + mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
31.286 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
31.287 +
31.288 + /* Setup P4 tlb/cache access regions */
31.289 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
31.290 + mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
31.291 + mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
31.292 + mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
31.293 + mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
31.294 + mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
31.295 + mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
31.296 + mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
31.297 + mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
31.298 + mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
31.299 + mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
31.300 +
31.301 + /* Setup P4 control region */
31.302 + mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
31.303 + mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
31.304 + mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
31.305 + mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
31.306 + mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
31.307 + mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
31.308 + mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
31.309 + mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
31.310 + mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
31.311 + mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
31.312 + mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
31.313 + mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
31.314 + mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
31.315 +
31.316 + register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
31.317 + mmu_utlb_1k_init();
31.318 +
31.319 + /* Ensure the code regions are executable */
31.320 + mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
31.321 + mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
31.322 }
31.323
31.324 void MMU_reset()
31.325 {
31.326 mmio_region_MMU_write( CCR, 0 );
31.327 mmio_region_MMU_write( MMUCR, 0 );
31.328 - mmu_utlb_sorted_reload();
31.329 }
31.330
31.331 void MMU_save_state( FILE *f )
31.332 {
31.333 - fwrite( cache, 4096, 2, f );
31.334 + mmu_fix_urc();
31.335 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
31.336 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
31.337 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
31.338 @@ -250,13 +169,6 @@
31.339
31.340 int MMU_load_state( FILE *f )
31.341 {
31.342 - /* Setup the cache mode according to the saved register value
31.343 - * (mem_load runs before this point to load all MMIO data)
31.344 - */
31.345 - mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
31.346 - if( fread( cache, 4096, 2, f ) != 2 ) {
31.347 - return 1;
31.348 - }
31.349 if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
31.350 return 1;
31.351 }
31.352 @@ -275,151 +187,21 @@
31.353 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
31.354 return 1;
31.355 }
31.356 - mmu_utlb_sorted_reload();
31.357 +
31.358 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
31.359 + mmu_urc_overflow = mmu_urc >= mmu_urb;
31.360 + mmu_set_tlb_enabled(mmucr&MMUCR_AT);
31.361 + mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
31.362 return 0;
31.363 }
31.364
31.365 -void mmu_set_cache_mode( int mode )
31.366 -{
31.367 - uint32_t i;
31.368 - switch( mode ) {
31.369 - case MEM_OC_INDEX0: /* OIX=0 */
31.370 - for( i=OCRAM_START; i<OCRAM_END; i++ )
31.371 - page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
31.372 - break;
31.373 - case MEM_OC_INDEX1: /* OIX=1 */
31.374 - for( i=OCRAM_START; i<OCRAM_END; i++ )
31.375 - page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
31.376 - break;
31.377 - default: /* disabled */
31.378 - for( i=OCRAM_START; i<OCRAM_END; i++ )
31.379 - page_map[i] = NULL;
31.380 - break;
31.381 - }
31.382 -}
31.383 -
31.384 -/******************* Sorted TLB data structure ****************/
31.385 -/*
31.386 - * mmu_utlb_sorted maintains a list of all active (valid) entries,
31.387 - * sorted by masked VPN and then ASID. Multi-hit entries are resolved
31.388 - * ahead of time, and have -1 recorded as the corresponding PPN.
31.389 - *
31.390 - * FIXME: Multi-hit detection doesn't pick up cases where two pages
31.391 - * overlap due to different sizes (and don't share the same base
31.392 - * address).
31.393 - */
31.394 -static void mmu_utlb_sorted_reset()
31.395 -{
31.396 - mmu_utlb_entries = 0;
31.397 -}
31.398 -
31.399 -/**
31.400 - * Find an entry in the sorted table (VPN+ASID check).
31.401 - */
31.402 -static inline int mmu_utlb_sorted_find( sh4addr_t vma )
31.403 -{
31.404 - int low = 0;
31.405 - int high = mmu_utlb_entries;
31.406 - uint32_t lookup = (vma & 0xFFFFFC00) + mmu_asid;
31.407 -
31.408 - mmu_urc++;
31.409 - if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
31.410 - mmu_urc = 0;
31.411 - }
31.412 -
31.413 - while( low != high ) {
31.414 - int posn = (high+low)>>1;
31.415 - int masked = lookup & mmu_utlb_sorted[posn].mask;
31.416 - if( mmu_utlb_sorted[posn].key < masked ) {
31.417 - low = posn+1;
31.418 - } else if( mmu_utlb_sorted[posn].key > masked ) {
31.419 - high = posn;
31.420 - } else {
31.421 - return mmu_utlb_sorted[posn].entryNo;
31.422 - }
31.423 - }
31.424 - return -1;
31.425 -
31.426 -}
31.427 -
31.428 -static void mmu_utlb_insert_entry( int entry )
31.429 -{
31.430 - int low = 0;
31.431 - int high = mmu_utlb_entries;
31.432 - uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
31.433 -
31.434 - assert( mmu_utlb_entries < UTLB_ENTRY_COUNT );
31.435 - /* Find the insertion point */
31.436 - while( low != high ) {
31.437 - int posn = (high+low)>>1;
31.438 - if( mmu_utlb_sorted[posn].key < key ) {
31.439 - low = posn+1;
31.440 - } else if( mmu_utlb_sorted[posn].key > key ) {
31.441 - high = posn;
31.442 - } else {
31.443 - /* Exact match - multi-hit */
31.444 - mmu_utlb_sorted[posn].entryNo = -2;
31.445 - return;
31.446 - }
31.447 - } /* 0 2 4 6 */
31.448 - memmove( &mmu_utlb_sorted[low+1], &mmu_utlb_sorted[low],
31.449 - (mmu_utlb_entries - low) * sizeof(struct utlb_sort_entry) );
31.450 - mmu_utlb_sorted[low].key = key;
31.451 - mmu_utlb_sorted[low].mask = mmu_utlb[entry].mask | 0x000000FF;
31.452 - mmu_utlb_sorted[low].entryNo = entry;
31.453 - mmu_utlb_entries++;
31.454 -}
31.455 -
31.456 -static void mmu_utlb_remove_entry( int entry )
31.457 -{
31.458 - int low = 0;
31.459 - int high = mmu_utlb_entries;
31.460 - uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
31.461 - while( low != high ) {
31.462 - int posn = (high+low)>>1;
31.463 - if( mmu_utlb_sorted[posn].key < key ) {
31.464 - low = posn+1;
31.465 - } else if( mmu_utlb_sorted[posn].key > key ) {
31.466 - high = posn;
31.467 - } else {
31.468 - if( mmu_utlb_sorted[posn].entryNo == -2 ) {
31.469 - /* Multiple-entry recorded - rebuild the whole table minus entry */
31.470 - int i;
31.471 - mmu_utlb_entries = 0;
31.472 - for( i=0; i< UTLB_ENTRY_COUNT; i++ ) {
31.473 - if( i != entry && (mmu_utlb[i].flags & TLB_VALID) ) {
31.474 - mmu_utlb_insert_entry(i);
31.475 - }
31.476 - }
31.477 - } else {
31.478 - mmu_utlb_entries--;
31.479 - memmove( &mmu_utlb_sorted[posn], &mmu_utlb_sorted[posn+1],
31.480 - (mmu_utlb_entries - posn)*sizeof(struct utlb_sort_entry) );
31.481 - }
31.482 - return;
31.483 - }
31.484 - }
31.485 - assert( 0 && "UTLB key not found!" );
31.486 -}
31.487 -
31.488 -static void mmu_utlb_sorted_reload()
31.489 -{
31.490 - int i;
31.491 - mmu_utlb_entries = 0;
31.492 - for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
31.493 - if( mmu_utlb[i].flags & TLB_VALID )
31.494 - mmu_utlb_insert_entry( i );
31.495 - }
31.496 -}
31.497 -
31.498 -/* TLB maintanence */
31.499 -
31.500 /**
31.501 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
31.502 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
31.503 */
31.504 void MMU_ldtlb()
31.505 {
31.506 + mmu_fix_urc();
31.507 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
31.508 mmu_utlb_remove_entry( mmu_urc );
31.509 mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
31.510 @@ -427,211 +209,632 @@
31.511 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
31.512 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
31.513 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
31.514 - mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
31.515 - if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
31.516 - mmu_utlb[mmu_urc].ppn |= 0xE0000000;
31.517 + mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
31.518 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
31.519 mmu_utlb_insert_entry( mmu_urc );
31.520 }
31.521
31.522 +
31.523 +MMIO_REGION_READ_FN( MMU, reg )
31.524 +{
31.525 + reg &= 0xFFF;
31.526 + switch( reg ) {
31.527 + case MMUCR:
31.528 + mmu_fix_urc();
31.529 + return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
31.530 + default:
31.531 + return MMIO_READ( MMU, reg );
31.532 + }
31.533 +}
31.534 +
31.535 +MMIO_REGION_WRITE_FN( MMU, reg, val )
31.536 +{
31.537 + uint32_t tmp;
31.538 + reg &= 0xFFF;
31.539 + switch(reg) {
31.540 + case SH4VER:
31.541 + return;
31.542 + case PTEH:
31.543 + val &= 0xFFFFFCFF;
31.544 + if( (val & 0xFF) != mmu_asid ) {
31.545 + mmu_set_tlb_asid( val&0xFF );
31.546 + sh4_icache.page_vma = -1; // invalidate icache as asid has changed
31.547 + }
31.548 + break;
31.549 + case PTEL:
31.550 + val &= 0x1FFFFDFF;
31.551 + break;
31.552 + case PTEA:
31.553 + val &= 0x0000000F;
31.554 + break;
31.555 + case TRA:
31.556 + val &= 0x000003FC;
31.557 + break;
31.558 + case EXPEVT:
31.559 + case INTEVT:
31.560 + val &= 0x00000FFF;
31.561 + break;
31.562 + case MMUCR:
31.563 + if( val & MMUCR_TI ) {
31.564 + mmu_invalidate_tlb();
31.565 + }
31.566 + mmu_urc = (val >> 10) & 0x3F;
31.567 + mmu_urb = (val >> 18) & 0x3F;
31.568 + if( mmu_urb == 0 ) {
31.569 + mmu_urb = 0x40;
31.570 + } else if( mmu_urc >= mmu_urb ) {
31.571 + mmu_urc_overflow = TRUE;
31.572 + }
31.573 + mmu_lrui = (val >> 26) & 0x3F;
31.574 + val &= 0x00000301;
31.575 + tmp = MMIO_READ( MMU, MMUCR );
31.576 + if( (val ^ tmp) & (MMUCR_SQMD) ) {
31.577 + mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
31.578 + }
31.579 + if( (val ^ tmp) & (MMUCR_AT) ) {
31.580 + // AT flag has changed state - flush the xlt cache as all bets
31.581 + // are off now. We also need to force an immediate exit from the
31.582 + // current block
31.583 + mmu_set_tlb_enabled( val & MMUCR_AT );
31.584 + MMIO_WRITE( MMU, MMUCR, val );
31.585 + sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
31.586 + xlat_flush_cache(); // If we're not running, flush the cache anyway
31.587 + }
31.588 + break;
31.589 + case CCR:
31.590 + CCN_set_cache_control( val );
31.591 + val &= 0x81A7;
31.592 + break;
31.593 + case MMUUNK1:
31.594 + /* Note that if the high bit is set, this appears to reset the machine.
31.595 + * Not emulating this behaviour yet until we know why...
31.596 + */
31.597 + val &= 0x00010007;
31.598 + break;
31.599 + case QACR0:
31.600 + case QACR1:
31.601 + val &= 0x0000001C;
31.602 + break;
31.603 + case PMCR1:
31.604 + PMM_write_control(0, val);
31.605 + val &= 0x0000C13F;
31.606 + break;
31.607 + case PMCR2:
31.608 + PMM_write_control(1, val);
31.609 + val &= 0x0000C13F;
31.610 + break;
31.611 + default:
31.612 + break;
31.613 + }
31.614 + MMIO_WRITE( MMU, reg, val );
31.615 +}
31.616 +
31.617 +/********************** 1K Page handling ***********************/
31.618 +/* Since we use 4K pages as our native page size, 1K pages need a bit of extra
31.619 + * effort to manage - we justify this on the basis that most programs won't
31.620 + * actually use 1K pages, so we may as well optimize for the common case.
31.621 + *
31.622 + * Implementation uses an intermediate page entry (the utlb_1k_entry) that
31.623 + * redirects requests to the 'real' page entry. These are allocated on an
31.624 + * as-needed basis, and returned to the pool when all subpages are empty.
31.625 + */
31.626 +static void mmu_utlb_1k_init()
31.627 +{
31.628 + int i;
31.629 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
31.630 + mmu_utlb_1k_free_list[i] = i;
31.631 + mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
31.632 + }
31.633 + mmu_utlb_1k_free_index = 0;
31.634 +}
31.635 +
31.636 +static struct utlb_1k_entry *mmu_utlb_1k_alloc()
31.637 +{
31.638 + assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
31.639 + struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
31.640 + return entry;
31.641 +}
31.642 +
31.643 +static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
31.644 +{
31.645 + unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
31.646 + assert( entryNo < UTLB_ENTRY_COUNT );
31.647 + assert( mmu_utlb_1k_free_index > 0 );
31.648 + mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
31.649 +}
31.650 +
31.651 +
31.652 +/********************** Address space maintenance *************************/
31.653 +
31.654 +/**
31.655 + * MMU accessor functions just increment URC - fixup here if necessary
31.656 + */
31.657 +static inline void mmu_fix_urc()
31.658 +{
31.659 + if( mmu_urc_overflow ) {
31.660 + if( mmu_urc >= 0x40 ) {
31.661 + mmu_urc_overflow = FALSE;
31.662 + mmu_urc -= 0x40;
31.663 + mmu_urc %= mmu_urb;
31.664 + }
31.665 + } else {
31.666 + mmu_urc %= mmu_urb;
31.667 + }
31.668 +}
31.669 +
31.670 +static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
31.671 +{
31.672 + int count = (end - start) >> 12;
31.673 + mem_region_fn_t *ptr = &sh4_address_space[start>>12];
31.674 + while( count-- > 0 ) {
31.675 + *ptr++ = fn;
31.676 + }
31.677 +}
31.678 +static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
31.679 +{
31.680 + int count = (end - start) >> 12;
31.681 + mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
31.682 + while( count-- > 0 ) {
31.683 + *ptr++ = fn;
31.684 + }
31.685 +}
31.686 +
31.687 +static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
31.688 +{
31.689 + int i;
31.690 + if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
31.691 + /* TLB on */
31.692 + sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
31.693 + sh4_address_space[(page|0xA0000000)>>12] = fn;
31.694 + /* Scan UTLB and update any direct-referencing entries */
31.695 + } else {
31.696 + /* Direct map to U0, P0, P1, P2, P3 */
31.697 + for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
31.698 + sh4_address_space[(page|i)>>12] = fn;
31.699 + }
31.700 + for( i=0; i < 0x80000000; i+= 0x20000000 ) {
31.701 + sh4_user_address_space[(page|i)>>12] = fn;
31.702 + }
31.703 + }
31.704 +}
31.705 +
31.706 +static void mmu_set_tlb_enabled( int tlb_on )
31.707 +{
31.708 + mem_region_fn_t *ptr, *uptr;
31.709 + int i;
31.710 +
31.711 + /* Reset the storequeue area */
31.712 +
31.713 + if( tlb_on ) {
31.714 + mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
31.715 + mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
31.716 + mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
31.717 +
31.718 + /* Default SQ prefetch goes to TLB miss (?) */
31.719 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
31.720 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
31.721 + mmu_utlb_register_all();
31.722 + } else {
31.723 + for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
31.724 + memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
31.725 + }
31.726 + for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
31.727 + memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
31.728 + }
31.729 +
31.730 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
31.731 + if( IS_STOREQUEUE_PROTECTED() ) {
31.732 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
31.733 + } else {
31.734 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
31.735 + }
31.736 + }
31.737 +
31.738 +}
31.739 +
31.740 +/**
31.741 + * Flip the SQMD switch - this is rather expensive, so will need to be changed if
31.742 + * anything expects to do this frequently.
31.743 + */
31.744 +static void mmu_set_storequeue_protected( int protected, int tlb_on )
31.745 +{
31.746 + mem_region_fn_t nontlb_region;
31.747 + int i;
31.748 +
31.749 + if( protected ) {
31.750 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
31.751 + nontlb_region = &p4_region_storequeue_sqmd;
31.752 + } else {
31.753 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
31.754 + nontlb_region = &p4_region_storequeue;
31.755 + }
31.756 +
31.757 + if( tlb_on ) {
31.758 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
31.759 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
31.760 + if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
31.761 + mmu_utlb_insert_entry(i);
31.762 + }
31.763 + }
31.764 + } else {
31.765 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region );
31.766 + }
31.767 +
31.768 +}
31.769 +
31.770 +static void mmu_set_tlb_asid( uint32_t asid )
31.771 +{
31.772 + /* Scan for pages that need to be remapped */
31.773 + int i;
31.774 + if( IS_SV_ENABLED() ) {
31.775 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
31.776 + if( mmu_utlb[i].flags & TLB_VALID ) {
31.777 + if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
31.778 + if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
31.779 + if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
31.780 + get_tlb_size_pages(mmu_utlb[i].flags) ) )
31.781 + mmu_utlb_remap_pages( FALSE, TRUE, i );
31.782 + } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
31.783 + mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
31.784 + mmu_utlb[i].vpn&mmu_utlb[i].mask,
31.785 + get_tlb_size_pages(mmu_utlb[i].flags) );
31.786 + }
31.787 + }
31.788 + }
31.789 + }
31.790 + } else {
31.791 + // Remap both Priv+user pages
31.792 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
31.793 + if( mmu_utlb[i].flags & TLB_VALID ) {
31.794 + if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
31.795 + if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
31.796 + if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
31.797 + get_tlb_size_pages(mmu_utlb[i].flags) ) )
31.798 + mmu_utlb_remap_pages( TRUE, TRUE, i );
31.799 + } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
31.800 + mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
31.801 + mmu_utlb[i].vpn&mmu_utlb[i].mask,
31.802 + get_tlb_size_pages(mmu_utlb[i].flags) );
31.803 + }
31.804 + }
31.805 + }
31.806 + }
31.807 + }
31.808 +
31.809 + mmu_asid = asid;
31.810 +}
31.811 +
31.812 +static uint32_t get_tlb_size_mask( uint32_t flags )
31.813 +{
31.814 + switch( flags & TLB_SIZE_MASK ) {
31.815 + case TLB_SIZE_1K: return MASK_1K;
31.816 + case TLB_SIZE_4K: return MASK_4K;
31.817 + case TLB_SIZE_64K: return MASK_64K;
31.818 + case TLB_SIZE_1M: return MASK_1M;
31.819 + default: return 0; /* Unreachable */
31.820 + }
31.821 +}
31.822 +static uint32_t get_tlb_size_pages( uint32_t flags )
31.823 +{
31.824 + switch( flags & TLB_SIZE_MASK ) {
31.825 + case TLB_SIZE_1K: return 0;
31.826 + case TLB_SIZE_4K: return 1;
31.827 + case TLB_SIZE_64K: return 16;
31.828 + case TLB_SIZE_1M: return 256;
31.829 + default: return 0; /* Unreachable */
31.830 + }
31.831 +}
31.832 +
31.833 +/**
31.834 + * Add a new TLB entry mapping to the address space table. If any of the pages
31.835 + * are already mapped, they are mapped to the TLB multi-hit page instead.
31.836 + * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
31.837 + */
31.838 +static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
31.839 +{
31.840 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
31.841 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
31.842 + struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
31.843 + struct utlb_default_regions *userdefs = privdefs;
31.844 +
31.845 + gboolean mapping_ok = TRUE;
31.846 + int i;
31.847 +
31.848 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
31.849 + /* Storequeue mapping */
31.850 + privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
31.851 + userdefs = mmu_user_storequeue_regions;
31.852 + } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
31.853 + user_page = NULL; /* No user access to P3 region */
31.854 + } else if( start_addr >= 0x80000000 ) {
31.855 + return TRUE; // No mapping - legal but meaningless
31.856 + }
31.857 +
31.858 + if( npages == 0 ) {
31.859 + struct utlb_1k_entry *ent;
31.860 + int i, idx = (start_addr >> 10) & 0x03;
31.861 + if( IS_1K_PAGE_ENTRY(*ptr) ) {
31.862 + ent = (struct utlb_1k_entry *)*ptr;
31.863 + } else {
31.864 + ent = mmu_utlb_1k_alloc();
31.865 + /* New 1K struct - init to previous contents of region */
31.866 + for( i=0; i<4; i++ ) {
31.867 + ent->subpages[i] = *ptr;
31.868 + ent->user_subpages[i] = *uptr;
31.869 + }
31.870 + *ptr = &ent->fn;
31.871 + *uptr = &ent->user_fn;
31.872 + }
31.873 +
31.874 + if( priv_page != NULL ) {
31.875 + if( ent->subpages[idx] == privdefs->tlb_miss ) {
31.876 + ent->subpages[idx] = priv_page;
31.877 + } else {
31.878 + mapping_ok = FALSE;
31.879 + ent->subpages[idx] = privdefs->tlb_multihit;
31.880 + }
31.881 + }
31.882 + if( user_page != NULL ) {
31.883 + if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
31.884 + ent->user_subpages[idx] = user_page;
31.885 + } else {
31.886 + mapping_ok = FALSE;
31.887 + ent->user_subpages[idx] = userdefs->tlb_multihit;
31.888 + }
31.889 + }
31.890 +
31.891 + } else {
31.892 + if( priv_page != NULL ) {
31.893 + /* Privileged mapping only */
31.894 + for( i=0; i<npages; i++ ) {
31.895 + if( *ptr == privdefs->tlb_miss ) {
31.896 + *ptr++ = priv_page;
31.897 + } else {
31.898 + mapping_ok = FALSE;
31.899 + *ptr++ = privdefs->tlb_multihit;
31.900 + }
31.901 + }
31.902 + }
31.903 + if( user_page != NULL ) {
31.904 + /* User mapping only (eg ASID change remap w/ SV=1) */
31.905 + for( i=0; i<npages; i++ ) {
31.906 + if( *uptr == userdefs->tlb_miss ) {
31.907 + *uptr++ = user_page;
31.908 + } else {
31.909 + mapping_ok = FALSE;
31.910 + *uptr++ = userdefs->tlb_multihit;
31.911 + }
31.912 + }
31.913 + }
31.914 + }
31.915 +
31.916 + return mapping_ok;
31.917 +}
31.918 +
31.919 +/**
31.920 + * Remap any pages within the region covered by entryNo, but not including
31.921 + * entryNo itself. This is used to reestablish pages that were previously
31.922 + * covered by a multi-hit exception region when one of the pages is removed.
31.923 + */
31.924 +static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
31.925 +{
31.926 + int mask = mmu_utlb[entryNo].mask;
31.927 + uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
31.928 + int i;
31.929 +
31.930 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
31.931 + if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
31.932 + /* Overlapping region */
31.933 + mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
31.934 + mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
31.935 + uint32_t start_addr;
31.936 + int npages;
31.937 +
31.938 + if( mmu_utlb[i].mask >= mask ) {
31.939 + /* entry is no larger than the area we're replacing - map completely */
31.940 + start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
31.941 + npages = get_tlb_size_pages( mmu_utlb[i].flags );
31.942 + } else {
31.943 + /* Otherwise map subset - region covered by removed page */
31.944 + start_addr = remap_addr;
31.945 + npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
31.946 + }
31.947 +
31.948 + if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) {
31.949 + mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
31.950 + } else if( IS_SV_ENABLED() ) {
31.951 + mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
31.952 + }
31.953 +
31.954 + }
31.955 + }
31.956 +}
31.957 +
31.958 +/**
31.959 + * Remove a previous TLB mapping (replacing them with the TLB miss region).
31.960 + * @return FALSE if any pages were previously mapped to the TLB multihit page,
31.961 + * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
31.962 + */
31.963 +static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
31.964 +{
31.965 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
31.966 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
31.967 + struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
31.968 + struct utlb_default_regions *userdefs = privdefs;
31.969 +
31.970 + gboolean unmapping_ok = TRUE;
31.971 + int i;
31.972 +
31.973 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
31.974 + /* Storequeue mapping */
31.975 + privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
31.976 + userdefs = mmu_user_storequeue_regions;
31.977 + } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
31.978 + unmap_user = FALSE;
31.979 + } else if( start_addr >= 0x80000000 ) {
31.980 + return TRUE; // No mapping - legal but meaningless
31.981 + }
31.982 +
31.983 + if( npages == 0 ) { // 1K page
31.984 + assert( IS_1K_PAGE_ENTRY( *ptr ) );
31.985 + struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
31.986 + int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
31.987 + if( ent->subpages[idx] == privdefs->tlb_multihit ) {
31.988 + unmapping_ok = FALSE;
31.989 + }
31.990 + if( unmap_priv )
31.991 + ent->subpages[idx] = privdefs->tlb_miss;
31.992 + if( unmap_user )
31.993 + ent->user_subpages[idx] = userdefs->tlb_miss;
31.994 +
31.995 + /* If all 4 subpages have the same content, merge them together and
31.996 + * release the 1K entry
31.997 + */
31.998 + mem_region_fn_t priv_page = ent->subpages[0];
31.999 + mem_region_fn_t user_page = ent->user_subpages[0];
31.1000 + for( i=1; i<4; i++ ) {
31.1001 + if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
31.1002 + mergeable = 0;
31.1003 + break;
31.1004 + }
31.1005 + }
31.1006 + if( mergeable ) {
31.1007 + mmu_utlb_1k_free(ent);
31.1008 + *ptr = priv_page;
31.1009 + *uptr = user_page;
31.1010 + }
31.1011 + } else {
31.1012 + if( unmap_priv ) {
31.1013 + /* Privileged (un)mapping */
31.1014 + for( i=0; i<npages; i++ ) {
31.1015 + if( *ptr == privdefs->tlb_multihit ) {
31.1016 + unmapping_ok = FALSE;
31.1017 + }
31.1018 + *ptr++ = privdefs->tlb_miss;
31.1019 + }
31.1020 + }
31.1021 + if( unmap_user ) {
31.1022 + /* User (un)mapping */
31.1023 + for( i=0; i<npages; i++ ) {
31.1024 + if( *uptr == userdefs->tlb_multihit ) {
31.1025 + unmapping_ok = FALSE;
31.1026 + }
31.1027 + *uptr++ = userdefs->tlb_miss;
31.1028 + }
31.1029 + }
31.1030 + }
31.1031 +
31.1032 + return unmapping_ok;
31.1033 +}
31.1034 +
31.1035 +static void mmu_utlb_insert_entry( int entry )
31.1036 +{
31.1037 + struct utlb_entry *ent = &mmu_utlb[entry];
31.1038 + mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
31.1039 + mem_region_fn_t upage;
31.1040 + sh4addr_t start_addr = ent->vpn & ent->mask;
31.1041 + int npages = get_tlb_size_pages(ent->flags);
31.1042 +
31.1043 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
31.1044 + /* Store queue mappings are a bit different - normal access is fixed to
31.1045 + * the store queue register block, and we only map prefetches through
31.1046 + * the TLB
31.1047 + */
31.1048 + mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
31.1049 +
31.1050 + if( (ent->flags & TLB_USERMODE) == 0 ) {
31.1051 + upage = mmu_user_storequeue_regions->tlb_prot;
31.1052 + } else if( IS_STOREQUEUE_PROTECTED() ) {
31.1053 + upage = &p4_region_storequeue_sqmd;
31.1054 + } else {
31.1055 + upage = page;
31.1056 + }
31.1057 +
31.1058 + } else {
31.1059 +
31.1060 + if( (ent->flags & TLB_USERMODE) == 0 ) {
31.1061 + upage = &mem_region_tlb_protected;
31.1062 + } else {
31.1063 + upage = page;
31.1064 + }
31.1065 +
31.1066 + if( (ent->flags & TLB_WRITABLE) == 0 ) {
31.1067 + page->write_long = (mem_write_fn_t)tlb_protected_write;
31.1068 + page->write_word = (mem_write_fn_t)tlb_protected_write;
31.1069 + page->write_byte = (mem_write_fn_t)tlb_protected_write;
31.1070 + page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
31.1071 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
31.1072 + } else if( (ent->flags & TLB_DIRTY) == 0 ) {
31.1073 + page->write_long = (mem_write_fn_t)tlb_initial_write;
31.1074 + page->write_word = (mem_write_fn_t)tlb_initial_write;
31.1075 + page->write_byte = (mem_write_fn_t)tlb_initial_write;
31.1076 + page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
31.1077 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
31.1078 + } else {
31.1079 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
31.1080 + }
31.1081 + }
31.1082 +
31.1083 + mmu_utlb_pages[entry].user_fn = upage;
31.1084 +
31.1085 + /* Is page visible? */
31.1086 + if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
31.1087 + mmu_utlb_map_pages( page, upage, start_addr, npages );
31.1088 + } else if( IS_SV_ENABLED() ) {
31.1089 + mmu_utlb_map_pages( page, NULL, start_addr, npages );
31.1090 + }
31.1091 +}
31.1092 +
31.1093 +static void mmu_utlb_remove_entry( int entry )
31.1094 +{
31.1095 + int i, j;
31.1096 + struct utlb_entry *ent = &mmu_utlb[entry];
31.1097 + sh4addr_t start_addr = ent->vpn&ent->mask;
31.1098 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
31.1099 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
31.1100 + gboolean unmap_user;
31.1101 + int npages = get_tlb_size_pages(ent->flags);
31.1102 +
31.1103 + if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
31.1104 + unmap_user = TRUE;
31.1105 + } else if( IS_SV_ENABLED() ) {
31.1106 + unmap_user = FALSE;
31.1107 + } else {
31.1108 + return; // Not mapped
31.1109 + }
31.1110 +
31.1111 + gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
31.1112 +
31.1113 + if( !clean_unmap ) {
31.1114 + mmu_utlb_remap_pages( TRUE, unmap_user, entry );
31.1115 + }
31.1116 +}
31.1117 +
31.1118 +static void mmu_utlb_register_all()
31.1119 +{
31.1120 + int i;
31.1121 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
31.1122 + if( mmu_utlb[i].flags & TLB_VALID )
31.1123 + mmu_utlb_insert_entry( i );
31.1124 + }
31.1125 +}
31.1126 +
31.1127 static void mmu_invalidate_tlb()
31.1128 {
31.1129 int i;
31.1130 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
31.1131 mmu_itlb[i].flags &= (~TLB_VALID);
31.1132 }
31.1133 + if( IS_TLB_ENABLED() ) {
31.1134 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
31.1135 + if( mmu_utlb[i].flags & TLB_VALID ) {
31.1136 + mmu_utlb_remove_entry( i );
31.1137 + }
31.1138 + }
31.1139 + }
31.1140 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
31.1141 mmu_utlb[i].flags &= (~TLB_VALID);
31.1142 }
31.1143 - mmu_utlb_entries = 0;
31.1144 -}
31.1145 -
31.1146 -#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
31.1147 -
31.1148 -int32_t mmu_itlb_addr_read( sh4addr_t addr )
31.1149 -{
31.1150 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
31.1151 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
31.1152 -}
31.1153 -int32_t mmu_itlb_data_read( sh4addr_t addr )
31.1154 -{
31.1155 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
31.1156 - return (ent->ppn & 0x1FFFFC00) | ent->flags;
31.1157 -}
31.1158 -
31.1159 -void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
31.1160 -{
31.1161 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
31.1162 - ent->vpn = val & 0xFFFFFC00;
31.1163 - ent->asid = val & 0x000000FF;
31.1164 - ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
31.1165 -}
31.1166 -
31.1167 -void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
31.1168 -{
31.1169 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
31.1170 - ent->ppn = val & 0x1FFFFC00;
31.1171 - ent->flags = val & 0x00001DA;
31.1172 - ent->mask = get_mask_for_flags(val);
31.1173 - if( ent->ppn >= 0x1C000000 )
31.1174 - ent->ppn |= 0xE0000000;
31.1175 -}
31.1176 -
31.1177 -#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
31.1178 -#define UTLB_ASSOC(addr) (addr&0x80)
31.1179 -#define UTLB_DATA2(addr) (addr&0x00800000)
31.1180 -
31.1181 -int32_t mmu_utlb_addr_read( sh4addr_t addr )
31.1182 -{
31.1183 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
31.1184 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
31.1185 - ((ent->flags & TLB_DIRTY)<<7);
31.1186 -}
31.1187 -int32_t mmu_utlb_data_read( sh4addr_t addr )
31.1188 -{
31.1189 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
31.1190 - if( UTLB_DATA2(addr) ) {
31.1191 - return ent->pcmcia;
31.1192 - } else {
31.1193 - return (ent->ppn&0x1FFFFC00) | ent->flags;
31.1194 - }
31.1195 -}
31.1196 -
31.1197 -/**
31.1198 - * Find a UTLB entry for the associative TLB write - same as the normal
31.1199 - * lookup but ignores the valid bit.
31.1200 - */
31.1201 -static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
31.1202 -{
31.1203 - int result = -1;
31.1204 - unsigned int i;
31.1205 - for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
31.1206 - if( (mmu_utlb[i].flags & TLB_VALID) &&
31.1207 - ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
31.1208 - ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
31.1209 - if( result != -1 ) {
31.1210 - fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
31.1211 - return -2;
31.1212 - }
31.1213 - result = i;
31.1214 - }
31.1215 - }
31.1216 - return result;
31.1217 -}
31.1218 -
31.1219 -/**
31.1220 - * Find a ITLB entry for the associative TLB write - same as the normal
31.1221 - * lookup but ignores the valid bit.
31.1222 - */
31.1223 -static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
31.1224 -{
31.1225 - int result = -1;
31.1226 - unsigned int i;
31.1227 - for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
31.1228 - if( (mmu_itlb[i].flags & TLB_VALID) &&
31.1229 - ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
31.1230 - ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
31.1231 - if( result != -1 ) {
31.1232 - return -2;
31.1233 - }
31.1234 - result = i;
31.1235 - }
31.1236 - }
31.1237 - return result;
31.1238 -}
31.1239 -
31.1240 -void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
31.1241 -{
31.1242 - if( UTLB_ASSOC(addr) ) {
31.1243 - int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
31.1244 - if( utlb >= 0 ) {
31.1245 - struct utlb_entry *ent = &mmu_utlb[utlb];
31.1246 - uint32_t old_flags = ent->flags;
31.1247 - ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
31.1248 - ent->flags |= (val & TLB_VALID);
31.1249 - ent->flags |= ((val & 0x200)>>7);
31.1250 - if( (old_flags & TLB_VALID) && !(ent->flags&TLB_VALID) ) {
31.1251 - mmu_utlb_remove_entry( utlb );
31.1252 - } else if( !(old_flags & TLB_VALID) && (ent->flags&TLB_VALID) ) {
31.1253 - mmu_utlb_insert_entry( utlb );
31.1254 - }
31.1255 - }
31.1256 -
31.1257 - int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
31.1258 - if( itlb >= 0 ) {
31.1259 - struct itlb_entry *ent = &mmu_itlb[itlb];
31.1260 - ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
31.1261 - }
31.1262 -
31.1263 - if( itlb == -2 || utlb == -2 ) {
31.1264 - MMU_TLB_MULTI_HIT_ERROR(addr);
31.1265 - return;
31.1266 - }
31.1267 - } else {
31.1268 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
31.1269 - if( ent->flags & TLB_VALID )
31.1270 - mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
31.1271 - ent->vpn = (val & 0xFFFFFC00);
31.1272 - ent->asid = (val & 0xFF);
31.1273 - ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
31.1274 - ent->flags |= (val & TLB_VALID);
31.1275 - ent->flags |= ((val & 0x200)>>7);
31.1276 - if( ent->flags & TLB_VALID )
31.1277 - mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
31.1278 - }
31.1279 -}
31.1280 -
31.1281 -void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
31.1282 -{
31.1283 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
31.1284 - if( UTLB_DATA2(addr) ) {
31.1285 - ent->pcmcia = val & 0x0000000F;
31.1286 - } else {
31.1287 - if( ent->flags & TLB_VALID )
31.1288 - mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
31.1289 - ent->ppn = (val & 0x1FFFFC00);
31.1290 - ent->flags = (val & 0x000001FF);
31.1291 - ent->mask = get_mask_for_flags(val);
31.1292 - if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
31.1293 - mmu_utlb[mmu_urc].ppn |= 0xE0000000;
31.1294 - if( ent->flags & TLB_VALID )
31.1295 - mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
31.1296 - }
31.1297 -}
31.1298 -
31.1299 -/* Cache access - not implemented */
31.1300 -
31.1301 -int32_t mmu_icache_addr_read( sh4addr_t addr )
31.1302 -{
31.1303 - return 0; // not implemented
31.1304 -}
31.1305 -int32_t mmu_icache_data_read( sh4addr_t addr )
31.1306 -{
31.1307 - return 0; // not implemented
31.1308 -}
31.1309 -int32_t mmu_ocache_addr_read( sh4addr_t addr )
31.1310 -{
31.1311 - return 0; // not implemented
31.1312 -}
31.1313 -int32_t mmu_ocache_data_read( sh4addr_t addr )
31.1314 -{
31.1315 - return 0; // not implemented
31.1316 -}
31.1317 -
31.1318 -void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
31.1319 -{
31.1320 -}
31.1321 -
31.1322 -void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
31.1323 -{
31.1324 -}
31.1325 -
31.1326 -void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
31.1327 -{
31.1328 -}
31.1329 -
31.1330 -void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
31.1331 -{
31.1332 }
31.1333
31.1334 /******************************************************************************/
31.1335 @@ -639,9 +842,22 @@
31.1336 /******************************************************************************/
31.1337
31.1338 /**
31.1339 - * The translations are excessively complicated, but unfortunately it's a
31.1340 - * complicated system. TODO: make this not be painfully slow.
31.1341 + * Translate a 32-bit address into a UTLB entry number. Does not check for
31.1342 + * page protection etc.
31.1343 + * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
31.1344 */
31.1345 +int mmu_utlb_entry_for_vpn( uint32_t vpn )
31.1346 +{
31.1347 + mem_region_fn_t fn = sh4_address_space[vpn>>12];
31.1348 + if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
31.1349 + return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
31.1350 + } else if( fn == &mem_region_tlb_multihit ) {
31.1351 + return -2;
31.1352 + } else {
31.1353 + return -1;
31.1354 + }
31.1355 +}
31.1356 +
31.1357
31.1358 /**
31.1359 * Perform the actual utlb lookup w/ asid matching.
31.1360 @@ -763,7 +979,7 @@
31.1361 }
31.1362
31.1363 if( result == -1 ) {
31.1364 - int utlbEntry = mmu_utlb_sorted_find( vpn );
31.1365 + int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
31.1366 if( utlbEntry < 0 ) {
31.1367 return utlbEntry;
31.1368 } else {
31.1369 @@ -824,130 +1040,6 @@
31.1370 return result;
31.1371 }
31.1372
31.1373 -#ifdef HAVE_FRAME_ADDRESS
31.1374 -sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc )
31.1375 -#else
31.1376 -sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
31.1377 -#endif
31.1378 -{
31.1379 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
31.1380 - if( addr & 0x80000000 ) {
31.1381 - if( IS_SH4_PRIVMODE() ) {
31.1382 - if( addr >= 0xE0000000 ) {
31.1383 - return addr; /* P4 - passthrough */
31.1384 - } else if( addr < 0xC0000000 ) {
31.1385 - /* P1, P2 regions are pass-through (no translation) */
31.1386 - return VMA_TO_EXT_ADDR(addr);
31.1387 - }
31.1388 - } else {
31.1389 - if( addr >= 0xE0000000 && addr < 0xE4000000 &&
31.1390 - ((mmucr&MMUCR_SQMD) == 0) ) {
31.1391 - /* Conditional user-mode access to the store-queue (no translation) */
31.1392 - return addr;
31.1393 - }
31.1394 - MMU_READ_ADDR_ERROR();
31.1395 - RETURN_VIA(exc);
31.1396 - }
31.1397 - }
31.1398 -
31.1399 - if( (mmucr & MMUCR_AT) == 0 ) {
31.1400 - return VMA_TO_EXT_ADDR(addr);
31.1401 - }
31.1402 -
31.1403 - /* If we get this far, translation is required */
31.1404 - int entryNo;
31.1405 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
31.1406 - entryNo = mmu_utlb_sorted_find( addr );
31.1407 - } else {
31.1408 - entryNo = mmu_utlb_lookup_vpn( addr );
31.1409 - }
31.1410 -
31.1411 - switch(entryNo) {
31.1412 - case -1:
31.1413 - MMU_TLB_READ_MISS_ERROR(addr);
31.1414 - RETURN_VIA(exc);
31.1415 - case -2:
31.1416 - MMU_TLB_MULTI_HIT_ERROR(addr);
31.1417 - RETURN_VIA(exc);
31.1418 - default:
31.1419 - if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
31.1420 - !IS_SH4_PRIVMODE() ) {
31.1421 - /* protection violation */
31.1422 - MMU_TLB_READ_PROT_ERROR(addr);
31.1423 - RETURN_VIA(exc);
31.1424 - }
31.1425 -
31.1426 - /* finally generate the target address */
31.1427 - return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
31.1428 - (addr & (~mmu_utlb[entryNo].mask));
31.1429 - }
31.1430 -}
31.1431 -
31.1432 -#ifdef HAVE_FRAME_ADDRESS
31.1433 -sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc )
31.1434 -#else
31.1435 -sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
31.1436 -#endif
31.1437 -{
31.1438 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
31.1439 - if( addr & 0x80000000 ) {
31.1440 - if( IS_SH4_PRIVMODE() ) {
31.1441 - if( addr >= 0xE0000000 ) {
31.1442 - return addr; /* P4 - passthrough */
31.1443 - } else if( addr < 0xC0000000 ) {
31.1444 - /* P1, P2 regions are pass-through (no translation) */
31.1445 - return VMA_TO_EXT_ADDR(addr);
31.1446 - }
31.1447 - } else {
31.1448 - if( addr >= 0xE0000000 && addr < 0xE4000000 &&
31.1449 - ((mmucr&MMUCR_SQMD) == 0) ) {
31.1450 - /* Conditional user-mode access to the store-queue (no translation) */
31.1451 - return addr;
31.1452 - }
31.1453 - MMU_WRITE_ADDR_ERROR();
31.1454 - RETURN_VIA(exc);
31.1455 - }
31.1456 - }
31.1457 -
31.1458 - if( (mmucr & MMUCR_AT) == 0 ) {
31.1459 - return VMA_TO_EXT_ADDR(addr);
31.1460 - }
31.1461 -
31.1462 - /* If we get this far, translation is required */
31.1463 - int entryNo;
31.1464 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
31.1465 - entryNo = mmu_utlb_sorted_find( addr );
31.1466 - } else {
31.1467 - entryNo = mmu_utlb_lookup_vpn( addr );
31.1468 - }
31.1469 -
31.1470 - switch(entryNo) {
31.1471 - case -1:
31.1472 - MMU_TLB_WRITE_MISS_ERROR(addr);
31.1473 - RETURN_VIA(exc);
31.1474 - case -2:
31.1475 - MMU_TLB_MULTI_HIT_ERROR(addr);
31.1476 - RETURN_VIA(exc);
31.1477 - default:
31.1478 - if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
31.1479 - : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
31.1480 - /* protection violation */
31.1481 - MMU_TLB_WRITE_PROT_ERROR(addr);
31.1482 - RETURN_VIA(exc);
31.1483 - }
31.1484 -
31.1485 - if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
31.1486 - MMU_TLB_INITIAL_WRITE_ERROR(addr);
31.1487 - RETURN_VIA(exc);
31.1488 - }
31.1489 -
31.1490 - /* finally generate the target address */
31.1491 - sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
31.1492 - (addr & (~mmu_utlb[entryNo].mask));
31.1493 - return pma;
31.1494 - }
31.1495 -}
31.1496 -
31.1497 /**
31.1498 * Update the icache for an untranslated address
31.1499 */
31.1500 @@ -958,13 +1050,13 @@
31.1501 sh4_icache.page_vma = addr & 0xFF000000;
31.1502 sh4_icache.page_ppa = 0x0C000000;
31.1503 sh4_icache.mask = 0xFF000000;
31.1504 - sh4_icache.page = sh4_main_ram;
31.1505 + sh4_icache.page = dc_main_ram;
31.1506 } else if( (addr & 0x1FE00000) == 0 ) {
31.1507 /* BIOS ROM */
31.1508 sh4_icache.page_vma = addr & 0xFFE00000;
31.1509 sh4_icache.page_ppa = 0;
31.1510 sh4_icache.mask = 0xFFE00000;
31.1511 - sh4_icache.page = mem_get_region(0);
31.1512 + sh4_icache.page = dc_boot_rom;
31.1513 } else {
31.1514 /* not supported */
31.1515 sh4_icache.page_vma = -1;
31.1516 @@ -993,7 +1085,7 @@
31.1517 mmu_update_icache_phys(addr);
31.1518 return TRUE;
31.1519 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
31.1520 - MMU_READ_ADDR_ERROR();
31.1521 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
31.1522 return FALSE;
31.1523 }
31.1524 }
31.1525 @@ -1010,7 +1102,7 @@
31.1526 entryNo = mmu_itlb_lookup_vpn( addr );
31.1527 } else {
31.1528 if( addr & 0x80000000 ) {
31.1529 - MMU_READ_ADDR_ERROR();
31.1530 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
31.1531 return FALSE;
31.1532 }
31.1533
31.1534 @@ -1023,17 +1115,17 @@
31.1535 entryNo = mmu_itlb_lookup_vpn_asid( addr );
31.1536
31.1537 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
31.1538 - MMU_TLB_READ_PROT_ERROR(addr);
31.1539 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
31.1540 return FALSE;
31.1541 }
31.1542 }
31.1543
31.1544 switch(entryNo) {
31.1545 case -1:
31.1546 - MMU_TLB_READ_MISS_ERROR(addr);
31.1547 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
31.1548 return FALSE;
31.1549 case -2:
31.1550 - MMU_TLB_MULTI_HIT_ERROR(addr);
31.1551 + RAISE_TLB_MULTIHIT_ERROR(addr);
31.1552 return FALSE;
31.1553 default:
31.1554 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
31.1555 @@ -1083,55 +1175,365 @@
31.1556 }
31.1557 }
31.1558
31.1559 -void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
31.1560 +/********************** TLB Direct-Access Regions ***************************/
31.1561 +#ifdef HAVE_FRAME_ADDRESS
31.1562 +#define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
31.1563 +#else
31.1564 +#define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
31.1565 +#endif
31.1566 +
31.1567 +
31.1568 +#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
31.1569 +
31.1570 +int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
31.1571 {
31.1572 - int queue = (addr&0x20)>>2;
31.1573 - uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
31.1574 - sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
31.1575 - sh4addr_t target = (addr&0x03FFFFE0) | hi;
31.1576 - mem_copy_to_sh4( target, src, 32 );
31.1577 -}
31.1578 -
31.1579 -gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )
31.1580 -{
31.1581 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
31.1582 - int queue = (addr&0x20)>>2;
31.1583 - sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
31.1584 - sh4addr_t target;
31.1585 - /* Store queue operation */
31.1586 -
31.1587 - int entryNo;
31.1588 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
31.1589 - entryNo = mmu_utlb_lookup_vpn_asid( addr );
31.1590 - } else {
31.1591 - entryNo = mmu_utlb_lookup_vpn( addr );
31.1592 - }
31.1593 - switch(entryNo) {
31.1594 - case -1:
31.1595 - MMU_TLB_WRITE_MISS_ERROR(addr);
31.1596 - return FALSE;
31.1597 - case -2:
31.1598 - MMU_TLB_MULTI_HIT_ERROR(addr);
31.1599 - return FALSE;
31.1600 - default:
31.1601 - if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
31.1602 - : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
31.1603 - /* protection violation */
31.1604 - MMU_TLB_WRITE_PROT_ERROR(addr);
31.1605 - return FALSE;
31.1606 - }
31.1607 -
31.1608 - if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
31.1609 - MMU_TLB_INITIAL_WRITE_ERROR(addr);
31.1610 - return FALSE;
31.1611 - }
31.1612 -
31.1613 - /* finally generate the target address */
31.1614 - target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
31.1615 - (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
31.1616 - }
31.1617 -
31.1618 - mem_copy_to_sh4( target, src, 32 );
31.1619 - return TRUE;
31.1620 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
31.1621 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
31.1622 }
31.1623
31.1624 +void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
31.1625 +{
31.1626 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
31.1627 + ent->vpn = val & 0xFFFFFC00;
31.1628 + ent->asid = val & 0x000000FF;
31.1629 + ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
31.1630 +}
31.1631 +
31.1632 +int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
31.1633 +{
31.1634 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
31.1635 + return (ent->ppn & 0x1FFFFC00) | ent->flags;
31.1636 +}
31.1637 +
31.1638 +void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
31.1639 +{
31.1640 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
31.1641 + ent->ppn = val & 0x1FFFFC00;
31.1642 + ent->flags = val & 0x00001DA;
31.1643 + ent->mask = get_tlb_size_mask(val);
31.1644 + if( ent->ppn >= 0x1C000000 )
31.1645 + ent->ppn |= 0xE0000000;
31.1646 +}
31.1647 +
31.1648 +#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
31.1649 +#define UTLB_ASSOC(addr) (addr&0x80)
31.1650 +#define UTLB_DATA2(addr) (addr&0x00800000)
31.1651 +
31.1652 +int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
31.1653 +{
31.1654 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
31.1655 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
31.1656 + ((ent->flags & TLB_DIRTY)<<7);
31.1657 +}
31.1658 +int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
31.1659 +{
31.1660 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
31.1661 + if( UTLB_DATA2(addr) ) {
31.1662 + return ent->pcmcia;
31.1663 + } else {
31.1664 + return (ent->ppn&0x1FFFFC00) | ent->flags;
31.1665 + }
31.1666 +}
31.1667 +
31.1668 +/**
31.1669 + * Find a UTLB entry for the associative TLB write - same as the normal
31.1670 + * lookup but ignores the valid bit.
31.1671 + */
31.1672 +static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
31.1673 +{
31.1674 + int result = -1;
31.1675 + unsigned int i;
31.1676 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
31.1677 + if( (mmu_utlb[i].flags & TLB_VALID) &&
31.1678 + ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
31.1679 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
31.1680 + if( result != -1 ) {
31.1681 + fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
31.1682 + return -2;
31.1683 + }
31.1684 + result = i;
31.1685 + }
31.1686 + }
31.1687 + return result;
31.1688 +}
31.1689 +
31.1690 +/**
31.1691 + * Find a ITLB entry for the associative TLB write - same as the normal
31.1692 + * lookup but ignores the valid bit.
31.1693 + */
31.1694 +static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
31.1695 +{
31.1696 + int result = -1;
31.1697 + unsigned int i;
31.1698 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
31.1699 + if( (mmu_itlb[i].flags & TLB_VALID) &&
31.1700 + ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
31.1701 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
31.1702 + if( result != -1 ) {
31.1703 + return -2;
31.1704 + }
31.1705 + result = i;
31.1706 + }
31.1707 + }
31.1708 + return result;
31.1709 +}
31.1710 +
31.1711 +void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
31.1712 +{
31.1713 + if( UTLB_ASSOC(addr) ) {
31.1714 + int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
31.1715 + if( utlb >= 0 ) {
31.1716 + struct utlb_entry *ent = &mmu_utlb[utlb];
31.1717 + uint32_t old_flags = ent->flags;
31.1718 + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
31.1719 + ent->flags |= (val & TLB_VALID);
31.1720 + ent->flags |= ((val & 0x200)>>7);
31.1721 + if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
31.1722 + if( old_flags & TLB_VALID )
31.1723 + mmu_utlb_remove_entry( utlb );
31.1724 + if( ent->flags & TLB_VALID )
31.1725 + mmu_utlb_insert_entry( utlb );
31.1726 + }
31.1727 + }
31.1728 +
31.1729 + int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
31.1730 + if( itlb >= 0 ) {
31.1731 + struct itlb_entry *ent = &mmu_itlb[itlb];
31.1732 + ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
31.1733 + }
31.1734 +
31.1735 + if( itlb == -2 || utlb == -2 ) {
31.1736 + RAISE_TLB_MULTIHIT_ERROR(addr);
31.1737 + EXCEPTION_EXIT();
31.1738 + return;
31.1739 + }
31.1740 + } else {
31.1741 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
31.1742 + if( ent->flags & TLB_VALID )
31.1743 + mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
31.1744 + ent->vpn = (val & 0xFFFFFC00);
31.1745 + ent->asid = (val & 0xFF);
31.1746 + ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
31.1747 + ent->flags |= (val & TLB_VALID);
31.1748 + ent->flags |= ((val & 0x200)>>7);
31.1749 + if( ent->flags & TLB_VALID )
31.1750 + mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
31.1751 + }
31.1752 +}
31.1753 +
31.1754 +void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
31.1755 +{
31.1756 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
31.1757 + if( UTLB_DATA2(addr) ) {
31.1758 + ent->pcmcia = val & 0x0000000F;
31.1759 + } else {
31.1760 + if( ent->flags & TLB_VALID )
31.1761 + mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
31.1762 + ent->ppn = (val & 0x1FFFFC00);
31.1763 + ent->flags = (val & 0x000001FF);
31.1764 + ent->mask = get_tlb_size_mask(val);
31.1765 + if( ent->flags & TLB_VALID )
31.1766 + mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
31.1767 + }
31.1768 +}
31.1769 +
31.1770 +struct mem_region_fn p4_region_itlb_addr = {
31.1771 + mmu_itlb_addr_read, mmu_itlb_addr_write,
31.1772 + mmu_itlb_addr_read, mmu_itlb_addr_write,
31.1773 + mmu_itlb_addr_read, mmu_itlb_addr_write,
31.1774 + unmapped_read_burst, unmapped_write_burst,
31.1775 + unmapped_prefetch };
31.1776 +struct mem_region_fn p4_region_itlb_data = {
31.1777 + mmu_itlb_data_read, mmu_itlb_data_write,
31.1778 + mmu_itlb_data_read, mmu_itlb_data_write,
31.1779 + mmu_itlb_data_read, mmu_itlb_data_write,
31.1780 + unmapped_read_burst, unmapped_write_burst,
31.1781 + unmapped_prefetch };
31.1782 +struct mem_region_fn p4_region_utlb_addr = {
31.1783 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
31.1784 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
31.1785 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
31.1786 + unmapped_read_burst, unmapped_write_burst,
31.1787 + unmapped_prefetch };
31.1788 +struct mem_region_fn p4_region_utlb_data = {
31.1789 + mmu_utlb_data_read, mmu_utlb_data_write,
31.1790 + mmu_utlb_data_read, mmu_utlb_data_write,
31.1791 + mmu_utlb_data_read, mmu_utlb_data_write,
31.1792 + unmapped_read_burst, unmapped_write_burst,
31.1793 + unmapped_prefetch };
31.1794 +
31.1795 +/********************** Error regions **************************/
31.1796 +
31.1797 +static void FASTCALL address_error_read( sh4addr_t addr, void *exc )
31.1798 +{
31.1799 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
31.1800 + EXCEPTION_EXIT();
31.1801 +}
31.1802 +
31.1803 +static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
31.1804 +{
31.1805 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
31.1806 + EXCEPTION_EXIT();
31.1807 +}
31.1808 +
31.1809 +static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
31.1810 +{
31.1811 + RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
31.1812 + EXCEPTION_EXIT();
31.1813 +}
31.1814 +
31.1815 +static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
31.1816 +{
31.1817 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
31.1818 + EXCEPTION_EXIT();
31.1819 +}
31.1820 +
31.1821 +static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
31.1822 +{
31.1823 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
31.1824 + EXCEPTION_EXIT();
31.1825 +}
31.1826 +
31.1827 +static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
31.1828 +{
31.1829 + RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
31.1830 + EXCEPTION_EXIT();
31.1831 +}
31.1832 +
31.1833 +static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
31.1834 +{
31.1835 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
31.1836 + EXCEPTION_EXIT();
31.1837 +}
31.1838 +
31.1839 +static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
31.1840 +{
31.1841 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
31.1842 + EXCEPTION_EXIT();
31.1843 +}
31.1844 +
31.1845 +static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
31.1846 +{
31.1847 + RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
31.1848 + EXCEPTION_EXIT();
31.1849 +}
31.1850 +
31.1851 +static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
31.1852 +{
31.1853 + RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
31.1854 + EXCEPTION_EXIT();
31.1855 +}
31.1856 +
31.1857 +static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
31.1858 +{
31.1859 + sh4_raise_tlb_multihit(addr);
31.1860 + EXCEPTION_EXIT();
31.1861 +}
31.1862 +
31.1863 +static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
31.1864 +{
31.1865 + sh4_raise_tlb_multihit(addr);
31.1866 + EXCEPTION_EXIT();
31.1867 +}
31.1868 +static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
31.1869 +{
31.1870 + sh4_raise_tlb_multihit(addr);
31.1871 + EXCEPTION_EXIT();
31.1872 +}
31.1873 +
31.1874 +/**
31.1875 + * Note: Per sec 4.6.4 of the SH7750 manual, SQ
31.1876 + */
31.1877 +struct mem_region_fn mem_region_address_error = {
31.1878 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1879 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1880 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1881 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
31.1882 + unmapped_prefetch };
31.1883 +
31.1884 +struct mem_region_fn mem_region_tlb_miss = {
31.1885 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
31.1886 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
31.1887 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
31.1888 + (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
31.1889 + unmapped_prefetch };
31.1890 +
31.1891 +struct mem_region_fn mem_region_tlb_protected = {
31.1892 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
31.1893 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
31.1894 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
31.1895 + (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
31.1896 + unmapped_prefetch };
31.1897 +
31.1898 +struct mem_region_fn mem_region_tlb_multihit = {
31.1899 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
31.1900 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
31.1901 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
31.1902 + (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
31.1903 + (mem_prefetch_fn_t)tlb_multi_hit_read };
31.1904 +
31.1905 +
31.1906 +/* Store-queue regions */
31.1907 +/* These are a bit of a pain - the first 8 fields are controlled by SQMD, while
31.1908 + * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
31.1909 + * some cases), in contrast to the ordinary fields above.
31.1910 + *
31.1911 + * There is probably a simpler way to do this.
31.1912 + */
31.1913 +
31.1914 +struct mem_region_fn p4_region_storequeue = {
31.1915 + ccn_storequeue_read_long, ccn_storequeue_write_long,
31.1916 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
31.1917 + unmapped_read_long, unmapped_write_long,
31.1918 + unmapped_read_burst, unmapped_write_burst,
31.1919 + ccn_storequeue_prefetch };
31.1920 +
31.1921 +struct mem_region_fn p4_region_storequeue_miss = {
31.1922 + ccn_storequeue_read_long, ccn_storequeue_write_long,
31.1923 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
31.1924 + unmapped_read_long, unmapped_write_long,
31.1925 + unmapped_read_burst, unmapped_write_burst,
31.1926 + (mem_prefetch_fn_t)tlb_miss_read };
31.1927 +
31.1928 +struct mem_region_fn p4_region_storequeue_multihit = {
31.1929 + ccn_storequeue_read_long, ccn_storequeue_write_long,
31.1930 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
31.1931 + unmapped_read_long, unmapped_write_long,
31.1932 + unmapped_read_burst, unmapped_write_burst,
31.1933 + (mem_prefetch_fn_t)tlb_multi_hit_read };
31.1934 +
31.1935 +struct mem_region_fn p4_region_storequeue_protected = {
31.1936 + ccn_storequeue_read_long, ccn_storequeue_write_long,
31.1937 + unmapped_read_long, unmapped_write_long,
31.1938 + unmapped_read_long, unmapped_write_long,
31.1939 + unmapped_read_burst, unmapped_write_burst,
31.1940 + (mem_prefetch_fn_t)tlb_protected_read };
31.1941 +
31.1942 +struct mem_region_fn p4_region_storequeue_sqmd = {
31.1943 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1944 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1945 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1946 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
31.1947 + (mem_prefetch_fn_t)address_error_read };
31.1948 +
31.1949 +struct mem_region_fn p4_region_storequeue_sqmd_miss = {
31.1950 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1951 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1952 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1953 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
31.1954 + (mem_prefetch_fn_t)tlb_miss_read };
31.1955 +
31.1956 +struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
31.1957 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1958 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1959 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1960 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
31.1961 + (mem_prefetch_fn_t)tlb_multi_hit_read };
31.1962 +
31.1963 +struct mem_region_fn p4_region_storequeue_sqmd_protected = {
31.1964 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1965 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1966 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
31.1967 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
31.1968 + (mem_prefetch_fn_t)tlb_protected_read };
31.1969 +
32.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
32.2 +++ b/src/sh4/mmu.h Tue Jan 13 11:56:28 2009 +0000
32.3 @@ -0,0 +1,159 @@
32.4 +/**
32.5 + * $Id$
32.6 + *
32.7 + * MMU/TLB definitions.
32.8 + *
32.9 + * Copyright (c) 2005 Nathan Keynes.
32.10 + *
32.11 + * This program is free software; you can redistribute it and/or modify
32.12 + * it under the terms of the GNU General Public License as published by
32.13 + * the Free Software Foundation; either version 2 of the License, or
32.14 + * (at your option) any later version.
32.15 + *
32.16 + * This program is distributed in the hope that it will be useful,
32.17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
32.18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32.19 + * GNU General Public License for more details.
32.20 + */
32.21 +
32.22 +
32.23 +#ifndef lxdream_sh4_mmu_H
32.24 +#define lxdream_sh4_mmu_H 1
32.25 +
32.26 +#include "lxdream.h"
32.27 +
32.28 +#ifdef __cplusplus
32.29 +extern "C" {
32.30 +#endif
32.31 +
32.32 +#define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
32.33 +
32.34 +/************************** UTLB/ITLB Definitions ***************************/
32.35 +/* mmucr register bits */
32.36 +#define MMUCR_AT 0x00000001 /* Address Translation enabled */
32.37 +#define MMUCR_TI 0x00000004 /* TLB invalidate (always read as 0) */
32.38 +#define MMUCR_SV 0x00000100 /* Single Virtual mode=1 / multiple virtual=0 */
32.39 +#define MMUCR_SQMD 0x00000200 /* Store queue mode bit (0=user, 1=priv only) */
32.40 +#define MMUCR_URC 0x0000FC00 /* UTLB access counter */
32.41 +#define MMUCR_URB 0x00FC0000 /* UTLB entry boundary */
32.42 +#define MMUCR_LRUI 0xFC000000 /* Least recently used ITLB */
32.43 +#define MMUCR_MASK 0xFCFCFF05
32.44 +#define MMUCR_RMASK 0xFCFCFF01 /* Read mask */
32.45 +
32.46 +#define IS_TLB_ENABLED() (MMIO_READ(MMU, MMUCR)&MMUCR_AT)
32.47 +#define IS_SV_ENABLED() (MMIO_READ(MMU,MMUCR)&MMUCR_SV)
32.48 +
32.49 +#define ITLB_ENTRY_COUNT 4
32.50 +#define UTLB_ENTRY_COUNT 64
32.51 +
32.52 +/* Entry address */
32.53 +#define TLB_VALID 0x00000100
32.54 +#define TLB_USERMODE 0x00000040
32.55 +#define TLB_WRITABLE 0x00000020
32.56 +#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
32.57 +#define TLB_SIZE_MASK 0x00000090
32.58 +#define TLB_SIZE_1K 0x00000000
32.59 +#define TLB_SIZE_4K 0x00000010
32.60 +#define TLB_SIZE_64K 0x00000080
32.61 +#define TLB_SIZE_1M 0x00000090
32.62 +#define TLB_CACHEABLE 0x00000008
32.63 +#define TLB_DIRTY 0x00000004
32.64 +#define TLB_SHARE 0x00000002
32.65 +#define TLB_WRITETHRU 0x00000001
32.66 +
32.67 +#define MASK_1K 0xFFFFFC00
32.68 +#define MASK_4K 0xFFFFF000
32.69 +#define MASK_64K 0xFFFF0000
32.70 +#define MASK_1M 0xFFF00000
32.71 +
32.72 +struct itlb_entry {
32.73 + sh4addr_t vpn; // Virtual Page Number
32.74 + uint32_t asid; // Process ID
32.75 + uint32_t mask;
32.76 + sh4addr_t ppn; // Physical Page Number
32.77 + uint32_t flags;
32.78 +};
32.79 +
32.80 +struct utlb_entry {
32.81 + sh4addr_t vpn; // Virtual Page Number
32.82 + uint32_t mask; // Page size mask
32.83 + uint32_t asid; // Process ID
32.84 + sh4addr_t ppn; // Physical Page Number
32.85 + uint32_t flags;
32.86 + uint32_t pcmcia; // extra pcmcia data - not used in this implementation
32.87 +};
32.88 +
32.89 +#define TLB_FUNC_SIZE 48
32.90 +
32.91 +struct utlb_page_entry {
32.92 + struct mem_region_fn fn;
32.93 + struct mem_region_fn *user_fn;
32.94 + mem_region_fn_t target;
32.95 + unsigned char code[TLB_FUNC_SIZE*9];
32.96 +};
32.97 +
32.98 +struct utlb_1k_entry {
32.99 + struct mem_region_fn fn;
32.100 + struct mem_region_fn user_fn;
32.101 + struct mem_region_fn *subpages[4];
32.102 + struct mem_region_fn *user_subpages[4];
32.103 + unsigned char code[TLB_FUNC_SIZE*18];
32.104 +};
32.105 +
32.106 +struct utlb_default_regions {
32.107 + mem_region_fn_t tlb_miss;
32.108 + mem_region_fn_t tlb_prot;
32.109 + mem_region_fn_t tlb_multihit;
32.110 +};
32.111 +
32.112 +
32.113 +void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable );
32.114 +void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *ent );
32.115 +void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page );
32.116 +
32.117 +extern uint32_t mmu_urc;
32.118 +extern uint32_t mmu_urb;
32.119 +
32.120 +/** Primary SH4 address space (privileged and user access)
32.121 + * Page map (4KB) of the entire 32-bit address space
32.122 + * Note: only callable from the SH4 cores as it depends on the caller setting
32.123 + * up an appropriate exception environment.
32.124 + **/
32.125 +extern struct mem_region_fn **sh4_address_space;
32.126 +extern struct mem_region_fn **sh4_user_address_space;
32.127 +
32.128 +/************ Storequeue/cache functions ***********/
32.129 +void FASTCALL ccn_storequeue_write_long( sh4addr_t addr, uint32_t val );
32.130 +int32_t FASTCALL ccn_storequeue_read_long( sh4addr_t addr );
32.131 +
32.132 +/** Default storequeue prefetch when TLB is disabled */
32.133 +void FASTCALL ccn_storequeue_prefetch( sh4addr_t addr );
32.134 +
32.135 +/** TLB-enabled variant of the storequeue prefetch */
32.136 +void FASTCALL ccn_storequeue_prefetch_tlb( sh4addr_t addr );
32.137 +
32.138 +/** Non-storequeue prefetch */
32.139 +void FASTCALL ccn_prefetch( sh4addr_t addr );
32.140 +
32.141 +/** Non-cached prefetch (ie, no-op) */
32.142 +void FASTCALL ccn_uncached_prefetch( sh4addr_t addr );
32.143 +
32.144 +
32.145 +extern struct mem_region_fn mem_region_address_error;
32.146 +extern struct mem_region_fn mem_region_tlb_miss;
32.147 +extern struct mem_region_fn mem_region_tlb_multihit;
32.148 +extern struct mem_region_fn mem_region_tlb_protected;
32.149 +
32.150 +extern struct mem_region_fn p4_region_storequeue;
32.151 +extern struct mem_region_fn p4_region_storequeue_multihit;
32.152 +extern struct mem_region_fn p4_region_storequeue_miss;
32.153 +extern struct mem_region_fn p4_region_storequeue_protected;
32.154 +extern struct mem_region_fn p4_region_storequeue_sqmd;
32.155 +extern struct mem_region_fn p4_region_storequeue_sqmd_miss;
32.156 +extern struct mem_region_fn p4_region_storequeue_sqmd_multihit;
32.157 +extern struct mem_region_fn p4_region_storequeue_sqmd_protected;
32.158 +
32.159 +#ifdef __cplusplus
32.160 +}
32.161 +#endif
32.162 +#endif /* !lxdream_sh4_mmu_H */
33.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
33.2 +++ b/src/sh4/mmux86.c Tue Jan 13 11:56:28 2009 +0000
33.3 @@ -0,0 +1,128 @@
33.4 +/**
33.5 + * $Id$
33.6 + *
33.7 + * x86-specific MMU code - this emits simple TLB stubs for TLB indirection.
33.8 + *
33.9 + * Copyright (c) 2008 Nathan Keynes.
33.10 + *
33.11 + * This program is free software; you can redistribute it and/or modify
33.12 + * it under the terms of the GNU General Public License as published by
33.13 + * the Free Software Foundation; either version 2 of the License, or
33.14 + * (at your option) any later version.
33.15 + *
33.16 + * This program is distributed in the hope that it will be useful,
33.17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
33.18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33.19 + * GNU General Public License for more details.
33.20 + */
33.21 +
33.22 +#include "lxdream.h"
33.23 +#include "mem.h"
33.24 +#include "sh4/sh4core.h"
33.25 +#include "sh4/sh4mmio.h"
33.26 +#include "sh4/sh4trans.h"
33.27 +#include "sh4/mmu.h"
33.28 +#include "sh4/x86op.h"
33.29 +
33.30 +#if SIZEOF_VOID_P == 8
33.31 +#define ARG1 R_EDI
33.32 +#define ARG2 R_ESI
33.33 +#define DECODE() \
33.34 + MOV_imm64_r32((uintptr_t)ext_address_space, R_EAX); /* movq ptr, %rax */ \
33.35 + REXW(); OP(0x8B); OP(0x0C); OP(0xC8) /* movq [%rax + %rcx*8], %rcx */
33.36 +#else
33.37 +#define ARG1 R_EAX
33.38 +#define ARG2 R_EDX
33.39 +#define DECODE() \
33.40 + MOV_r32disp32x4_r32( R_ECX, (uintptr_t)ext_address_space, R_ECX );
33.41 +#endif
33.42 +
33.43 +void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
33.44 +{
33.45 + uint32_t mask = ent->mask;
33.46 + uint32_t vpn = ent->vpn & mask;
33.47 + uint32_t ppn = ent->ppn & mask;
33.48 + int inc = writable ? 1 : 2;
33.49 + int i;
33.50 +
33.51 + xlat_output = page->code;
33.52 + uint8_t **fn = (uint8_t **)ext_address_space[ppn>>12];
33.53 + uint8_t **out = (uint8_t **)&page->fn;
33.54 +
33.55 + for( i=0; i<9; i+= inc, fn += inc, out += inc ) {
33.56 + *out = xlat_output;
33.57 +#if SIZEOF_VOID_P == 8
33.58 + MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
33.59 + OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
33.60 +#else
33.61 + OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
33.62 +#endif
33.63 + ADD_imm32_r32( ppn-vpn, ARG1 ); // 6
33.64 + if( ent->mask >= 0xFFFFF000 ) {
33.65 + // Maps to a single page, so jump directly there
33.66 + int rel = (*fn - xlat_output);
33.67 + JMP_rel( rel ); // 5
33.68 + } else {
33.69 + MOV_r32_r32( ARG1, R_ECX ); // 2
33.70 + SHR_imm8_r32( 12, R_ECX ); // 3
33.71 + DECODE(); // 14
33.72 + JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) ); // 3
33.73 + }
33.74 + }
33.75 +
33.76 + page->fn.prefetch = unmapped_prefetch; // FIXME
33.77 +}
33.78 +
33.79 +void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
33.80 +{
33.81 + uint32_t mask = ent->mask;
33.82 + uint32_t vpn = ent->vpn & mask;
33.83 + uint32_t ppn = ent->ppn & mask;
33.84 +
33.85 + xlat_output = page->code;
33.86 +
33.87 + memcpy( page, &p4_region_storequeue, sizeof(struct mem_region_fn) );
33.88 +
33.89 + /* TESTME: Does a PREF increment the URC counter? */
33.90 + page->fn.prefetch = (mem_prefetch_fn_t)xlat_output;
33.91 + ADD_imm32_r32( ppn-vpn, ARG1 );
33.92 + int rel = ((uint8_t *)ccn_storequeue_prefetch_tlb) - xlat_output;
33.93 + JMP_rel( rel );
33.94 +}
33.95 +
33.96 +void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
33.97 +{
33.98 + xlat_output = entry->code;
33.99 + int i;
33.100 + uint8_t **out = (uint8_t **)&entry->fn;
33.101 +
33.102 + for( i=0; i<9; i++, out++ ) {
33.103 + *out = xlat_output;
33.104 + MOV_r32_r32( ARG1, R_ECX );
33.105 + SHR_imm8_r32( 10, R_ECX );
33.106 + AND_imm8s_r32( 0x3, R_ECX );
33.107 +#if SIZEOF_VOID_P == 8
33.108 + MOV_imm64_r32( (uintptr_t)&entry->subpages[0], R_EAX );
33.109 + REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
33.110 +#else
33.111 + MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->subpages[0]), R_ECX );
33.112 +#endif
33.113 + JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) ); // 3
33.114 + }
33.115 +
33.116 + out = (uint8_t **)&entry->user_fn;
33.117 + for( i=0; i<9; i++, out++ ) {
33.118 + *out = xlat_output;
33.119 + MOV_r32_r32( ARG1, R_ECX );
33.120 + SHR_imm8_r32( 10, R_ECX );
33.121 + AND_imm8s_r32( 0x3, R_ECX );
33.122 +#if SIZEOF_VOID_P == 8
33.123 + MOV_imm64_r32( (uintptr_t)&entry->user_subpages[0], R_EAX );
33.124 + REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
33.125 +#else
33.126 + MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->user_subpages[0]), R_ECX );
33.127 +#endif
33.128 + JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) ); // 3
33.129 + }
33.130 +
33.131 +}
34.1 --- a/src/sh4/pmm.c Mon Dec 15 10:44:56 2008 +0000
34.2 +++ b/src/sh4/pmm.c Tue Jan 13 11:56:28 2009 +0000
34.3 @@ -131,8 +131,8 @@
34.4 }
34.5 }
34.6
34.7 -int32_t mmio_region_PMM_read( uint32_t reg )
34.8 -{
34.9 +MMIO_REGION_READ_FN( PMM, reg )
34.10 +{
34.11 switch( reg & 0x1F ) {
34.12 case 0: return 0; /* not a register */
34.13 case PMCTR1H:
34.14 @@ -150,7 +150,7 @@
34.15 }
34.16 }
34.17
34.18 -void mmio_region_PMM_write( uint32_t reg, uint32_t val )
34.19 +MMIO_REGION_WRITE_FN( PMM, reg, val )
34.20 {
34.21 /* Read-only */
34.22 }
35.1 --- a/src/sh4/scif.c Mon Dec 15 10:44:56 2008 +0000
35.2 +++ b/src/sh4/scif.c Tue Jan 13 11:56:28 2009 +0000
35.3 @@ -455,8 +455,9 @@
35.4 }
35.5 }
35.6
35.7 -int32_t mmio_region_SCIF_read( uint32_t reg )
35.8 +MMIO_REGION_READ_FN( SCIF, reg )
35.9 {
35.10 + reg &= 0xFFF;
35.11 switch( reg ) {
35.12 case SCFRDR2: /* Receive data */
35.13 return SCIF_recvq_dequeue(FALSE);
35.14 @@ -465,9 +466,10 @@
35.15 }
35.16 }
35.17
35.18 -void mmio_region_SCIF_write( uint32_t reg, uint32_t val )
35.19 +MMIO_REGION_WRITE_FN( SCIF, reg, val )
35.20 {
35.21 uint32_t tmp;
35.22 + reg &= 0xFFF;
35.23 switch( reg ) {
35.24 case SCSMR2: /* Serial mode register */
35.25 /* Bit 6 => 0 = 8-bit, 1 = 7-bit
36.1 --- a/src/sh4/sh4.c Mon Dec 15 10:44:56 2008 +0000
36.2 +++ b/src/sh4/sh4.c Tue Jan 13 11:56:28 2009 +0000
36.3 @@ -36,7 +36,7 @@
36.4
36.5 void sh4_init( void );
36.6 void sh4_xlat_init( void );
36.7 -void sh4_reset( void );
36.8 +void sh4_poweron_reset( void );
36.9 void sh4_start( void );
36.10 void sh4_stop( void );
36.11 void sh4_save_state( FILE *f );
36.12 @@ -45,14 +45,14 @@
36.13 uint32_t sh4_run_slice( uint32_t );
36.14 uint32_t sh4_xlat_run_slice( uint32_t );
36.15
36.16 -struct dreamcast_module sh4_module = { "SH4", sh4_init, sh4_reset,
36.17 +struct dreamcast_module sh4_module = { "SH4", sh4_init, sh4_poweron_reset,
36.18 sh4_start, sh4_run_slice, sh4_stop,
36.19 sh4_save_state, sh4_load_state };
36.20
36.21 struct sh4_registers sh4r __attribute__((aligned(16)));
36.22 struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
36.23 int sh4_breakpoint_count = 0;
36.24 -sh4ptr_t sh4_main_ram;
36.25 +
36.26 gboolean sh4_starting = FALSE;
36.27 static gboolean sh4_use_translator = FALSE;
36.28 static jmp_buf sh4_exit_jmp_buf;
36.29 @@ -63,7 +63,6 @@
36.30 {
36.31 // No-op if the translator was not built
36.32 #ifdef SH4_TRANSLATOR
36.33 - xlat_cache_init();
36.34 if( use ) {
36.35 sh4_translate_init();
36.36 }
36.37 @@ -79,10 +78,10 @@
36.38 void sh4_init(void)
36.39 {
36.40 register_io_regions( mmio_list_sh4mmio );
36.41 - sh4_main_ram = mem_get_region_by_name(MEM_REGION_MAIN);
36.42 MMU_init();
36.43 TMU_init();
36.44 - sh4_reset();
36.45 + xlat_cache_init();
36.46 + sh4_poweron_reset();
36.47 #ifdef ENABLE_SH4STATS
36.48 sh4_stats_reset();
36.49 #endif
36.50 @@ -93,15 +92,14 @@
36.51 sh4_starting = TRUE;
36.52 }
36.53
36.54 -void sh4_reset(void)
36.55 +void sh4_poweron_reset(void)
36.56 {
36.57 + /* zero everything out, for the sake of having a consistent state. */
36.58 + memset( &sh4r, 0, sizeof(sh4r) );
36.59 if( sh4_use_translator ) {
36.60 xlat_flush_cache();
36.61 }
36.62
36.63 - /* zero everything out, for the sake of having a consistent state. */
36.64 - memset( &sh4r, 0, sizeof(sh4r) );
36.65 -
36.66 /* Resume running if we were halted */
36.67 sh4r.sh4_state = SH4_STATE_RUNNING;
36.68
36.69 @@ -109,7 +107,7 @@
36.70 sh4r.new_pc= 0xA0000002;
36.71 sh4r.vbr = 0x00000000;
36.72 sh4r.fpscr = 0x00040001;
36.73 - sh4r.sr = 0x700000F0;
36.74 + sh4_write_sr(0x700000F0);
36.75
36.76 /* Mem reset will do this, but if we want to reset _just_ the SH4... */
36.77 MMIO_WRITE( MMU, EXPEVT, EXC_POWER_RESET );
36.78 @@ -117,14 +115,10 @@
36.79 /* Peripheral modules */
36.80 CPG_reset();
36.81 INTC_reset();
36.82 - MMU_reset();
36.83 PMM_reset();
36.84 TMU_reset();
36.85 SCIF_reset();
36.86 -
36.87 -#ifdef ENABLE_SH4STATS
36.88 - sh4_stats_reset();
36.89 -#endif
36.90 + MMU_reset();
36.91 }
36.92
36.93 void sh4_stop(void)
36.94 @@ -168,9 +162,7 @@
36.95 sh4_sleep_run_slice(nanosecs);
36.96 break;
36.97 case CORE_EXIT_FLUSH_ICACHE:
36.98 -#ifdef SH4_TRANSLATOR
36.99 xlat_flush_cache();
36.100 -#endif
36.101 break;
36.102 }
36.103
36.104 @@ -205,27 +197,22 @@
36.105 if( sh4_running ) {
36.106 #ifdef SH4_TRANSLATOR
36.107 if( sh4_use_translator ) {
36.108 - sh4_translate_exit_recover();
36.109 + if( exit_code == CORE_EXIT_EXCEPTION ) {
36.110 + sh4_translate_exception_exit_recover();
36.111 + } else {
36.112 + sh4_translate_exit_recover();
36.113 + }
36.114 }
36.115 #endif
36.116 + if( exit_code != CORE_EXIT_EXCEPTION ) {
36.117 + sh4_finalize_instruction();
36.118 + }
36.119 // longjmp back into sh4_run_slice
36.120 sh4_running = FALSE;
36.121 longjmp(sh4_exit_jmp_buf, exit_code);
36.122 }
36.123 }
36.124
36.125 -void sh4_flush_icache()
36.126 -{
36.127 -#ifdef SH4_TRANSLATOR
36.128 - // FIXME: Special case needs to be generalized
36.129 - if( sh4_use_translator ) {
36.130 - if( sh4_translate_flush_cache() ) {
36.131 - longjmp(sh4_exit_jmp_buf, CORE_EXIT_CONTINUE);
36.132 - }
36.133 - }
36.134 -#endif
36.135 -}
36.136 -
36.137 void sh4_save_state( FILE *f )
36.138 {
36.139 if( sh4_use_translator ) {
36.140 @@ -234,8 +221,9 @@
36.141 sh4r.in_delay_slot = FALSE;
36.142 }
36.143
36.144 - fwrite( &sh4r, sizeof(sh4r), 1, f );
36.145 + fwrite( &sh4r, offsetof(struct sh4_registers, xlat_sh4_mode), 1, f );
36.146 MMU_save_state( f );
36.147 + CCN_save_state( f );
36.148 PMM_save_state( f );
36.149 INTC_save_state( f );
36.150 TMU_save_state( f );
36.151 @@ -247,15 +235,16 @@
36.152 if( sh4_use_translator ) {
36.153 xlat_flush_cache();
36.154 }
36.155 - fread( &sh4r, sizeof(sh4r), 1, f );
36.156 + fread( &sh4r, offsetof(struct sh4_registers, xlat_sh4_mode), 1, f );
36.157 + sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
36.158 MMU_load_state( f );
36.159 + CCN_load_state( f );
36.160 PMM_load_state( f );
36.161 INTC_load_state( f );
36.162 TMU_load_state( f );
36.163 return SCIF_load_state( f );
36.164 }
36.165
36.166 -
36.167 void sh4_set_breakpoint( uint32_t pc, breakpoint_type_t type )
36.168 {
36.169 sh4_breakpoints[sh4_breakpoint_count].address = pc;
36.170 @@ -336,6 +325,7 @@
36.171 sh4r.s = (newval&SR_S) ? 1 : 0;
36.172 sh4r.m = (newval&SR_M) ? 1 : 0;
36.173 sh4r.q = (newval&SR_Q) ? 1 : 0;
36.174 + sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
36.175 intc_mask_changed();
36.176 }
36.177
36.178 @@ -345,6 +335,7 @@
36.179 sh4_switch_fr_banks();
36.180 }
36.181 sh4r.fpscr = newval & FPSCR_MASK;
36.182 + sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
36.183 }
36.184
36.185 uint32_t FASTCALL sh4_read_sr( void )
36.186 @@ -358,82 +349,88 @@
36.187 return sh4r.sr;
36.188 }
36.189
36.190 +/**
36.191 + * Raise a CPU reset exception with the specified exception code.
36.192 + */
36.193 +void FASTCALL sh4_raise_reset( int code )
36.194 +{
36.195 + MMIO_WRITE(MMU,EXPEVT,code);
36.196 + sh4r.vbr = 0x00000000;
36.197 + sh4r.pc = 0xA0000000;
36.198 + sh4r.new_pc = sh4r.pc + 2;
36.199 + sh4r.in_delay_slot = 0;
36.200 + sh4_write_sr( (sh4r.sr|SR_MD|SR_BL|SR_RB|SR_IMASK)&(~SR_FD) );
36.201 +
36.202 + /* Peripheral manual reset (FIXME: incomplete) */
36.203 + INTC_reset();
36.204 + SCIF_reset();
36.205 + MMU_reset();
36.206 +}
36.207
36.208 -
36.209 -#define RAISE( x, v ) do{ \
36.210 - if( sh4r.vbr == 0 ) { \
36.211 - ERROR( "%08X: VBR not initialized while raising exception %03X, halting", sh4r.pc, x ); \
36.212 - sh4_core_exit(CORE_EXIT_HALT); return FALSE; \
36.213 - } else { \
36.214 - sh4r.spc = sh4r.pc; \
36.215 - sh4r.ssr = sh4_read_sr(); \
36.216 - sh4r.sgr = sh4r.r[15]; \
36.217 - MMIO_WRITE(MMU,EXPEVT,x); \
36.218 - sh4r.pc = sh4r.vbr + v; \
36.219 - sh4r.new_pc = sh4r.pc + 2; \
36.220 - sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB ); \
36.221 - if( sh4r.in_delay_slot ) { \
36.222 - sh4r.in_delay_slot = 0; \
36.223 - sh4r.spc -= 2; \
36.224 - } \
36.225 - } \
36.226 - return TRUE; } while(0)
36.227 +void FASTCALL sh4_raise_tlb_multihit( sh4vma_t vpn )
36.228 +{
36.229 + MMIO_WRITE( MMU, TEA, vpn );
36.230 + MMIO_WRITE( MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)) );
36.231 + sh4_raise_reset( EXC_TLB_MULTI_HIT );
36.232 +}
36.233
36.234 /**
36.235 * Raise a general CPU exception for the specified exception code.
36.236 * (NOT for TRAPA or TLB exceptions)
36.237 */
36.238 -gboolean FASTCALL sh4_raise_exception( int code )
36.239 +void FASTCALL sh4_raise_exception( int code )
36.240 {
36.241 - RAISE( code, EXV_EXCEPTION );
36.242 -}
36.243 -
36.244 -/**
36.245 - * Raise a CPU reset exception with the specified exception code.
36.246 - */
36.247 -gboolean FASTCALL sh4_raise_reset( int code )
36.248 -{
36.249 - // FIXME: reset modules as per "manual reset"
36.250 - sh4_reset();
36.251 - MMIO_WRITE(MMU,EXPEVT,code);
36.252 - sh4r.vbr = 0;
36.253 - sh4r.pc = 0xA0000000;
36.254 - sh4r.new_pc = sh4r.pc + 2;
36.255 - sh4_write_sr( (sh4r.sr|SR_MD|SR_BL|SR_RB|SR_IMASK)
36.256 - &(~SR_FD) );
36.257 - return TRUE;
36.258 -}
36.259 -
36.260 -gboolean FASTCALL sh4_raise_trap( int trap )
36.261 -{
36.262 - MMIO_WRITE( MMU, TRA, trap<<2 );
36.263 - RAISE( EXC_TRAP, EXV_EXCEPTION );
36.264 -}
36.265 -
36.266 -gboolean FASTCALL sh4_raise_slot_exception( int normal_code, int slot_code ) {
36.267 - if( sh4r.in_delay_slot ) {
36.268 - return sh4_raise_exception(slot_code);
36.269 + if( sh4r.sr & SR_BL ) {
36.270 + sh4_raise_reset( EXC_MANUAL_RESET );
36.271 } else {
36.272 - return sh4_raise_exception(normal_code);
36.273 + sh4r.spc = sh4r.pc;
36.274 + sh4r.ssr = sh4_read_sr();
36.275 + sh4r.sgr = sh4r.r[15];
36.276 + MMIO_WRITE(MMU,EXPEVT, code);
36.277 + sh4r.pc = sh4r.vbr + EXV_EXCEPTION;
36.278 + sh4r.new_pc = sh4r.pc + 2;
36.279 + sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
36.280 + sh4r.in_delay_slot = 0;
36.281 }
36.282 }
36.283
36.284 -gboolean FASTCALL sh4_raise_tlb_exception( int code )
36.285 +void FASTCALL sh4_raise_trap( int trap )
36.286 {
36.287 - RAISE( code, EXV_TLBMISS );
36.288 + MMIO_WRITE( MMU, TRA, trap<<2 );
36.289 + MMIO_WRITE( MMU, EXPEVT, EXC_TRAP );
36.290 + sh4r.spc = sh4r.pc;
36.291 + sh4r.ssr = sh4_read_sr();
36.292 + sh4r.sgr = sh4r.r[15];
36.293 + sh4r.pc = sh4r.vbr + EXV_EXCEPTION;
36.294 + sh4r.new_pc = sh4r.pc + 2;
36.295 + sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
36.296 + sh4r.in_delay_slot = 0;
36.297 +}
36.298 +
36.299 +void FASTCALL sh4_raise_tlb_exception( int code, sh4vma_t vpn )
36.300 +{
36.301 + MMIO_WRITE( MMU, TEA, vpn );
36.302 + MMIO_WRITE( MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)) );
36.303 + MMIO_WRITE( MMU, EXPEVT, code );
36.304 + sh4r.spc = sh4r.pc;
36.305 + sh4r.ssr = sh4_read_sr();
36.306 + sh4r.sgr = sh4r.r[15];
36.307 + sh4r.pc = sh4r.vbr + EXV_TLBMISS;
36.308 + sh4r.new_pc = sh4r.pc + 2;
36.309 + sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
36.310 + sh4r.in_delay_slot = 0;
36.311 }
36.312
36.313 void FASTCALL sh4_accept_interrupt( void )
36.314 {
36.315 uint32_t code = intc_accept_interrupt();
36.316 + MMIO_WRITE( MMU, INTEVT, code );
36.317 sh4r.ssr = sh4_read_sr();
36.318 sh4r.spc = sh4r.pc;
36.319 sh4r.sgr = sh4r.r[15];
36.320 sh4_write_sr( sh4r.ssr|SR_BL|SR_MD|SR_RB );
36.321 - MMIO_WRITE( MMU, INTEVT, code );
36.322 sh4r.pc = sh4r.vbr + 0x600;
36.323 sh4r.new_pc = sh4r.pc + 2;
36.324 - // WARN( "Accepting interrupt %03X, from %08X => %08X", code, sh4r.spc, sh4r.pc );
36.325 }
36.326
36.327 void FASTCALL signsat48( void )
37.1 --- a/src/sh4/sh4.h Mon Dec 15 10:44:56 2008 +0000
37.2 +++ b/src/sh4/sh4.h Tue Jan 13 11:56:28 2009 +0000
37.3 @@ -87,6 +87,9 @@
37.4 * a delay slot (certain rules apply) */
37.5 uint32_t slice_cycle; /* Current nanosecond within the timeslice */
37.6 int sh4_state; /* Current power-on state (one of the SH4_STATE_* values ) */
37.7 +
37.8 + /* Not saved */
37.9 + int xlat_sh4_mode; /* Collection of execution mode flags (derived) from fpscr, sr, etc */
37.10 };
37.11
37.12 extern struct sh4_registers sh4r;
38.1 --- a/src/sh4/sh4core.h Mon Dec 15 10:44:56 2008 +0000
38.2 +++ b/src/sh4/sh4core.h Tue Jan 13 11:56:28 2009 +0000
38.3 @@ -1,10 +1,9 @@
38.4 /**
38.5 * $Id$
38.6 *
38.7 - * This file defines the internal functions exported/used by the SH4 core,
38.8 - * except for disassembly functions defined in sh4dasm.h
38.9 + * This file defines the internal functions used by the SH4 core,
38.10 *
38.11 - * Copyright (c) 2005 Nathan Keynes.
38.12 + * Copyright (c) 2005-2008 Nathan Keynes.
38.13 *
38.14 * This program is free software; you can redistribute it and/or modify
38.15 * it under the terms of the GNU General Public License as published by
38.16 @@ -33,7 +32,6 @@
38.17 /* Breakpoint data structure */
38.18 extern struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
38.19 extern int sh4_breakpoint_count;
38.20 -extern sh4ptr_t sh4_main_ram;
38.21 extern gboolean sh4_starting;
38.22
38.23 /**
38.24 @@ -74,7 +72,7 @@
38.25
38.26
38.27 /**
38.28 - * SH4 vm-exit flag - exit the current block but continue (eg exception handling)
38.29 + * SH4 vm-exit flag - exit the current block but continue normally
38.30 */
38.31 #define CORE_EXIT_CONTINUE 1
38.32
38.33 @@ -101,11 +99,17 @@
38.34 #define CORE_EXIT_SLEEP 5
38.35
38.36 /**
38.37 - * SH4 vm-exit flag - exit the current block and flush all instruction caches (ie
38.38 + * SH4 vm-exit flag - exit the current block and flush all instruction caches (ie
38.39 * if address translation has changed)
38.40 */
38.41 #define CORE_EXIT_FLUSH_ICACHE 6
38.42
38.43 +/**
38.44 + * SH4 vm-exit flag - exit the current block following a taken exception. sh4r.spc
38.45 + * is fixed up by recovery rather than sh4r.pc.
38.46 + */
38.47 +#define CORE_EXIT_EXCEPTION 7
38.48 +
38.49 typedef uint32_t (*sh4_run_slice_fn)(uint32_t);
38.50
38.51 /* SH4 module functions */
38.52 @@ -145,11 +149,12 @@
38.53 void INTC_reset( void );
38.54 void INTC_save_state( FILE *f );
38.55 int INTC_load_state( FILE *f );
38.56 -void MMU_init( void );
38.57 void MMU_reset( void );
38.58 void MMU_save_state( FILE *f );
38.59 int MMU_load_state( FILE *f );
38.60 void MMU_ldtlb();
38.61 +void CCN_save_state( FILE *f );
38.62 +int CCN_load_state( FILE *f );
38.63 void SCIF_reset( void );
38.64 void SCIF_run_slice( uint32_t );
38.65 void SCIF_save_state( FILE *f );
38.66 @@ -170,6 +175,8 @@
38.67 uint32_t sh4_emulate_run_slice(uint32_t);
38.68
38.69 /* SH4 instruction support methods */
38.70 +mem_region_fn_t FASTCALL sh7750_decode_address( sh4addr_t address );
38.71 +void FASTCALL sh7750_decode_address_copy( sh4addr_t address, mem_region_fn_t result );
38.72 void FASTCALL sh4_sleep( void );
38.73 void FASTCALL sh4_fsca( uint32_t angle, float *fr );
38.74 void FASTCALL sh4_ftrv( float *fv );
38.75 @@ -192,22 +199,6 @@
38.76 */
38.77 gboolean FASTCALL mmu_update_icache( sh4vma_t addr );
38.78
38.79 -/**
38.80 - * Resolve a virtual address through the TLB for a read operation, returning
38.81 - * the resultant P4 or external address. If the resolution fails, the
38.82 - * appropriate MMU exception is raised and the value MMU_VMA_ERROR is returned.
38.83 - * @return An external address (0x00000000-0x1FFFFFFF), a P4 address
38.84 - * (0xE0000000 - 0xFFFFFFFF), or MMU_VMA_ERROR.
38.85 - */
38.86 -#ifdef HAVE_FRAME_ADDRESS
38.87 -sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc );
38.88 -sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc );
38.89 -#else
38.90 -sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr );
38.91 -sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr );
38.92 -#endif
38.93 -sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t addr );
38.94 -
38.95 int64_t FASTCALL sh4_read_quad( sh4addr_t addr );
38.96 int32_t FASTCALL sh4_read_long( sh4addr_t addr );
38.97 int32_t FASTCALL sh4_read_word( sh4addr_t addr );
38.98 @@ -218,7 +209,7 @@
38.99 void FASTCALL sh4_write_byte( sh4addr_t addr, uint32_t val );
38.100 int32_t sh4_read_phys_word( sh4addr_t addr );
38.101 void FASTCALL sh4_flush_store_queue( sh4addr_t addr );
38.102 -gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr );
38.103 +void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc );
38.104
38.105 /* SH4 Exceptions */
38.106 #define EXC_POWER_RESET 0x000 /* reset vector */
38.107 @@ -241,20 +232,19 @@
38.108 #define EXV_TLBMISS 0x400 /* TLB-miss exception vector */
38.109 #define EXV_INTERRUPT 0x600 /* External interrupt vector */
38.110
38.111 -gboolean FASTCALL sh4_raise_exception( int );
38.112 -gboolean FASTCALL sh4_raise_reset( int );
38.113 -gboolean FASTCALL sh4_raise_trap( int );
38.114 -gboolean FASTCALL sh4_raise_slot_exception( int, int );
38.115 -gboolean FASTCALL sh4_raise_tlb_exception( int );
38.116 +void FASTCALL sh4_raise_exception( int );
38.117 +void FASTCALL sh4_raise_reset( int );
38.118 +void FASTCALL sh4_raise_trap( int );
38.119 +void FASTCALL sh4_raise_tlb_exception( int, sh4vma_t );
38.120 +void FASTCALL sh4_raise_tlb_multihit( sh4vma_t );
38.121 void FASTCALL sh4_accept_interrupt( void );
38.122
38.123 -#define SIGNEXT4(n) ((((int32_t)(n))<<28)>>28)
38.124 -#define SIGNEXT8(n) ((int32_t)((int8_t)(n)))
38.125 -#define SIGNEXT12(n) ((((int32_t)(n))<<20)>>20)
38.126 -#define SIGNEXT16(n) ((int32_t)((int16_t)(n)))
38.127 -#define SIGNEXT32(n) ((int64_t)((int32_t)(n)))
38.128 -#define SIGNEXT48(n) ((((int64_t)(n))<<16)>>16)
38.129 -#define ZEROEXT32(n) ((int64_t)((uint64_t)((uint32_t)(n))))
38.130 +/**
38.131 + * Complete the current instruction as part of a core exit. Prevents the
38.132 + * system from being left in an inconsistent state when an exit is
38.133 + * triggered during a memory write.
38.134 + */
38.135 +void sh4_finalize_instruction( void );
38.136
38.137 /* Status Register (SR) bits */
38.138 #define SR_MD 0x40000000 /* Processor mode ( User=0, Privileged=1 ) */
38.139 @@ -297,7 +287,17 @@
38.140 #define FPULf (sh4r.fpul.f)
38.141 #define FPULi (sh4r.fpul.i)
38.142
38.143 -#define SH4_WRITE_STORE_QUEUE(addr,val) sh4r.store_queue[(addr>>2)&0xF] = val;
38.144 +/**************** SH4 internal memory regions *****************/
38.145 +extern struct mem_region_fn p4_region_itlb_addr;
38.146 +extern struct mem_region_fn p4_region_itlb_data;
38.147 +extern struct mem_region_fn p4_region_utlb_addr;
38.148 +extern struct mem_region_fn p4_region_utlb_data;
38.149 +extern struct mem_region_fn p4_region_icache_addr;
38.150 +extern struct mem_region_fn p4_region_icache_data;
38.151 +extern struct mem_region_fn p4_region_ocache_addr;
38.152 +extern struct mem_region_fn p4_region_ocache_data;
38.153 +
38.154 +
38.155
38.156 #ifdef __cplusplus
38.157 }
39.1 --- a/src/sh4/sh4core.in Mon Dec 15 10:44:56 2008 +0000
39.2 +++ b/src/sh4/sh4core.in Tue Jan 13 11:56:28 2009 +0000
39.3 @@ -29,7 +29,7 @@
39.4 #include "sh4/sh4core.h"
39.5 #include "sh4/sh4mmio.h"
39.6 #include "sh4/sh4stat.h"
39.7 -#include "sh4/intc.h"
39.8 +#include "sh4/mmu.h"
39.9
39.10 #define SH4_CALLTRACE 1
39.11
39.12 @@ -102,9 +102,6 @@
39.13
39.14 /********************** SH4 emulation core ****************************/
39.15
39.16 -#define UNDEF(ir) return sh4_raise_slot_exception(EXC_ILLEGAL, EXC_SLOT_ILLEGAL)
39.17 -#define UNIMP(ir) do{ ERROR( "Halted on unimplemented instruction at %08x, opcode = %04x", sh4r.pc, ir ); sh4_core_exit(CORE_EXIT_HALT); return FALSE; }while(0)
39.18 -
39.19 #if(SH4_CALLTRACE == 1)
39.20 #define MAX_CALLSTACK 32
39.21 static struct call_stack {
39.22 @@ -152,72 +149,208 @@
39.23 #define TRACE_RETURN( source, dest )
39.24 #endif
39.25
39.26 -#define CHECKPRIV() if( !IS_SH4_PRIVMODE() ) return sh4_raise_slot_exception( EXC_ILLEGAL, EXC_SLOT_ILLEGAL )
39.27 -#define CHECKRALIGN16(addr) if( (addr)&0x01 ) return sh4_raise_exception( EXC_DATA_ADDR_READ )
39.28 -#define CHECKRALIGN32(addr) if( (addr)&0x03 ) return sh4_raise_exception( EXC_DATA_ADDR_READ )
39.29 -#define CHECKRALIGN64(addr) if( (addr)&0x07 ) return sh4_raise_exception( EXC_DATA_ADDR_READ )
39.30 -#define CHECKWALIGN16(addr) if( (addr)&0x01 ) return sh4_raise_exception( EXC_DATA_ADDR_WRITE )
39.31 -#define CHECKWALIGN32(addr) if( (addr)&0x03 ) return sh4_raise_exception( EXC_DATA_ADDR_WRITE )
39.32 -#define CHECKWALIGN64(addr) if( (addr)&0x07 ) return sh4_raise_exception( EXC_DATA_ADDR_WRITE )
39.33 +static gboolean FASTCALL sh4_raise_slot_exception( int normal_code, int slot_code ) {
39.34 + if( sh4r.in_delay_slot ) {
39.35 + sh4_raise_exception(slot_code);
39.36 + } else {
39.37 + sh4_raise_exception(normal_code);
39.38 + }
39.39 + return TRUE;
39.40 +}
39.41 +
39.42 +
39.43 +#define CHECKPRIV() if( !IS_SH4_PRIVMODE() ) { return sh4_raise_slot_exception( EXC_ILLEGAL, EXC_SLOT_ILLEGAL ); }
39.44 +#define CHECKRALIGN16(addr) if( (addr)&0x01 ) { sh4_raise_exception( EXC_DATA_ADDR_READ ); return TRUE; }
39.45 +#define CHECKRALIGN32(addr) if( (addr)&0x03 ) { sh4_raise_exception( EXC_DATA_ADDR_READ ); return TRUE; }
39.46 +#define CHECKRALIGN64(addr) if( (addr)&0x07 ) { sh4_raise_exception( EXC_DATA_ADDR_READ ); return TRUE; }
39.47 +#define CHECKWALIGN16(addr) if( (addr)&0x01 ) { sh4_raise_exception( EXC_DATA_ADDR_WRITE ); return TRUE; }
39.48 +#define CHECKWALIGN32(addr) if( (addr)&0x03 ) { sh4_raise_exception( EXC_DATA_ADDR_WRITE ); return TRUE; }
39.49 +#define CHECKWALIGN64(addr) if( (addr)&0x07 ) { sh4_raise_exception( EXC_DATA_ADDR_WRITE ); return TRUE; }
39.50
39.51 #define CHECKFPUEN() if( !IS_FPU_ENABLED() ) { if( ir == 0xFFFD ) { UNDEF(ir); } else { return sh4_raise_slot_exception( EXC_FPU_DISABLED, EXC_SLOT_FPU_DISABLED ); } }
39.52 #define CHECKDEST(p) if( (p) == 0 ) { ERROR( "%08X: Branch/jump to NULL, CPU halted", sh4r.pc ); sh4_core_exit(CORE_EXIT_HALT); return FALSE; }
39.53 -#define CHECKSLOTILLEGAL() if(sh4r.in_delay_slot) return sh4_raise_exception(EXC_SLOT_ILLEGAL)
39.54 +#define CHECKSLOTILLEGAL() if(sh4r.in_delay_slot) { sh4_raise_exception(EXC_SLOT_ILLEGAL); return TRUE; }
39.55 +
39.56 +#define ADDRSPACE (IS_SH4_PRIVMODE() ? sh4_address_space : sh4_user_address_space)
39.57 +#define SQADDRSPACE (IS_SH4_PRIVMODE() ? storequeue_address_space : storequeue_user_address_space)
39.58
39.59 #ifdef HAVE_FRAME_ADDRESS
39.60 static FASTCALL __attribute__((noinline)) void *__first_arg(void *a, void *b) { return a; }
39.61 #define INIT_EXCEPTIONS(label) goto *__first_arg(&&fnstart,&&label); fnstart:
39.62 -#define MMU_TRANSLATE_READ( addr ) memtmp = mmu_vma_to_phys_read(addr, &&except )
39.63 -#define MMU_TRANSLATE_WRITE( addr ) memtmp = mmu_vma_to_phys_write(addr, &&except )
39.64 +#define MEM_READ_BYTE( addr, val ) val = ((mem_read_exc_fn_t)ADDRSPACE[(addr)>>12]->read_byte)((addr), &&except)
39.65 +#define MEM_READ_WORD( addr, val ) val = ((mem_read_exc_fn_t)ADDRSPACE[(addr)>>12]->read_word)((addr), &&except)
39.66 +#define MEM_READ_LONG( addr, val ) val = ((mem_read_exc_fn_t)ADDRSPACE[(addr)>>12]->read_long)((addr), &&except)
39.67 +#define MEM_WRITE_BYTE( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_byte)((addr), (val), &&except)
39.68 +#define MEM_WRITE_WORD( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_word)((addr), (val), &&except)
39.69 +#define MEM_WRITE_LONG( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_long)((addr), (val), &&except)
39.70 +#define MEM_PREFETCH( addr ) ((mem_prefetch_exc_fn_t)ADDRSPACE[(addr)>>12]->prefetch)((addr), &&except)
39.71 #else
39.72 #define INIT_EXCEPTIONS(label)
39.73 -#define MMU_TRANSLATE_READ( addr ) if( (memtmp = mmu_vma_to_phys_read(addr)) == MMU_VMA_ERROR ) { return TRUE; }
39.74 -#define MMU_TRANSLATE_WRITE( addr ) if( (memtmp = mmu_vma_to_phys_write(addr)) == MMU_VMA_ERROR ) { return TRUE; }
39.75 +#define MEM_READ_BYTE( addr, val ) val = ADDRSPACE[(addr)>>12]->read_byte(addr)
39.76 +#define MEM_READ_WORD( addr, val ) val = ADDRSPACE[(addr)>>12]->read_word(addr)
39.77 +#define MEM_READ_LONG( addr, val ) val = ADDRSPACE[(addr)>>12]->read_long(addr)
39.78 +#define MEM_WRITE_BYTE( addr, val ) ADDRSPACE[(addr)>>12]->write_byte(addr, val)
39.79 +#define MEM_WRITE_WORD( addr, val ) ADDRSPACE[(addr)>>12]->write_word(addr, val)
39.80 +#define MEM_WRITE_LONG( addr, val ) ADDRSPACE[(addr)>>12]->write_long(addr, val)
39.81 +#define MEM_PREFETCH( addr ) ADDRSPACE[(addr)>>12]->prefetch(addr)
39.82 #endif
39.83 -
39.84 -#define MEM_READ_BYTE( addr, val ) MMU_TRANSLATE_READ(addr); val = sh4_read_byte(memtmp)
39.85 -#define MEM_READ_WORD( addr, val ) MMU_TRANSLATE_READ(addr); val = sh4_read_word(memtmp)
39.86 -#define MEM_READ_LONG( addr, val ) MMU_TRANSLATE_READ(addr); val = sh4_read_long(memtmp)
39.87 -#define MEM_WRITE_BYTE( addr, val ) MMU_TRANSLATE_WRITE(addr); sh4_write_byte(memtmp, val)
39.88 -#define MEM_WRITE_WORD( addr, val ) MMU_TRANSLATE_WRITE(addr); sh4_write_word(memtmp, val)
39.89 -#define MEM_WRITE_LONG( addr, val ) MMU_TRANSLATE_WRITE(addr); sh4_write_long(memtmp, val)
39.90 -
39.91
39.92 #define FP_WIDTH (IS_FPU_DOUBLESIZE() ? 8 : 4)
39.93
39.94 #define MEM_FP_READ( addr, reg ) \
39.95 if( IS_FPU_DOUBLESIZE() ) { \
39.96 CHECKRALIGN64(addr); \
39.97 - MMU_TRANSLATE_READ(addr); \
39.98 if( reg & 1 ) { \
39.99 - *((uint32_t *)&XF((reg) & 0x0E)) = sh4_read_long(memtmp); \
39.100 - *((uint32_t *)&XF(reg)) = sh4_read_long(memtmp+4); \
39.101 + MEM_READ_LONG( addr, *((uint32_t *)&XF((reg) & 0x0E)) ); \
39.102 + MEM_READ_LONG( addr+4, *((uint32_t *)&XF(reg)) ); \
39.103 } else { \
39.104 - *((uint32_t *)&FR(reg)) = sh4_read_long(memtmp); \
39.105 - *((uint32_t *)&FR((reg) | 0x01)) = sh4_read_long(memtmp+4); \
39.106 + MEM_READ_LONG( addr, *((uint32_t *)&FR(reg)) ); \
39.107 + MEM_READ_LONG( addr+4, *((uint32_t *)&FR((reg)|0x01)) ); \
39.108 } \
39.109 } else { \
39.110 CHECKRALIGN32(addr); \
39.111 - MMU_TRANSLATE_READ(addr); \
39.112 - *((uint32_t *)&FR(reg)) = sh4_read_long(memtmp); \
39.113 + MEM_READ_LONG( addr, *((uint32_t *)&FR(reg)) ); \
39.114 }
39.115 #define MEM_FP_WRITE( addr, reg ) \
39.116 if( IS_FPU_DOUBLESIZE() ) { \
39.117 CHECKWALIGN64(addr); \
39.118 - MMU_TRANSLATE_WRITE(addr); \
39.119 if( reg & 1 ) { \
39.120 - sh4_write_long( memtmp, *((uint32_t *)&XF((reg)&0x0E)) ); \
39.121 - sh4_write_long( memtmp+4, *((uint32_t *)&XF(reg)) ); \
39.122 + MEM_WRITE_LONG( addr, *((uint32_t *)&XF((reg)&0x0E)) ); \
39.123 + MEM_WRITE_LONG( addr+4, *((uint32_t *)&XF(reg)) ); \
39.124 } else { \
39.125 - sh4_write_long( memtmp, *((uint32_t *)&FR(reg)) ); \
39.126 - sh4_write_long( memtmp+4, *((uint32_t *)&FR((reg)|0x01)) ); \
39.127 + MEM_WRITE_LONG( addr, *((uint32_t *)&FR(reg)) ); \
39.128 + MEM_WRITE_LONG( addr+4, *((uint32_t *)&FR((reg)|0x01)) ); \
39.129 } \
39.130 } else { \
39.131 CHECKWALIGN32(addr); \
39.132 - MMU_TRANSLATE_WRITE(addr); \
39.133 - sh4_write_long( memtmp, *((uint32_t *)&FR((reg))) ); \
39.134 + MEM_WRITE_LONG(addr, *((uint32_t *)&FR((reg))) ); \
39.135 }
39.136
39.137 +#define UNDEF(ir)
39.138 +#define UNIMP(ir)
39.139 +
39.140 +/**
39.141 + * Perform instruction-completion following core exit of a partially completed
39.142 + * instruction. NOTE: This is only allowed on memory writes, operation is not
39.143 + * guaranteed in any other case.
39.144 + */
39.145 +void sh4_finalize_instruction( void )
39.146 +{
39.147 + unsigned short ir;
39.148 + uint32_t tmp;
39.149 +
39.150 + assert( IS_IN_ICACHE(sh4r.pc) );
39.151 + ir = *(uint16_t *)GET_ICACHE_PTR(sh4r.pc);
39.152 +
39.153 + /**
39.154 + * Note - we can't take an exit on a control transfer instruction itself,
39.155 + * which means the exit must have happened in the delay slot. So for these
39.156 + * cases, finalize the delay slot instruction, and re-execute the control transfer.
39.157 + *
39.158 + * For delay slots which modify the argument used in the branch instruction,
39.159 + * we pretty much just assume that that can't have already happened in an exit case.
39.160 + */
39.161 +
39.162 +%%
39.163 +BRA disp {:
39.164 + sh4r.pc += 2;
39.165 + sh4_finalize_instruction();
39.166 + sh4r.pc += disp;
39.167 + sh4r.slice_cycle += sh4_cpu_period;
39.168 +:}
39.169 +BRAF Rn {:
39.170 + sh4r.pc += 2;
39.171 + tmp = sh4r.r[Rn];
39.172 + sh4_finalize_instruction();
39.173 + sh4r.pc += tmp;
39.174 + sh4r.slice_cycle += sh4_cpu_period;
39.175 +:}
39.176 +BSR disp {:
39.177 + /* Note: PR is already set */
39.178 + sh4r.pc += 2;
39.179 + sh4_finalize_instruction();
39.180 + sh4r.pc += disp;
39.181 + sh4r.slice_cycle += sh4_cpu_period;
39.182 +:}
39.183 +BSRF Rn {:
39.184 + /* Note: PR is already set */
39.185 + sh4r.pc += 2;
39.186 + tmp = sh4r.r[Rn];
39.187 + sh4_finalize_instruction();
39.188 + sh4r.pc += tmp;
39.189 + sh4r.slice_cycle += sh4_cpu_period;
39.190 +:}
39.191 +BF/S disp {:
39.192 + sh4r.pc += 2;
39.193 + sh4_finalize_instruction();
39.194 + if( !sh4r.t ) {
39.195 + sh4r.pc += disp;
39.196 + }
39.197 + sh4r.slice_cycle += sh4_cpu_period;
39.198 +:}
39.199 +BT/S disp {:
39.200 + sh4r.pc += 2;
39.201 + sh4_finalize_instruction();
39.202 + if( sh4r.t ) {
39.203 + sh4r.pc += disp;
39.204 + }
39.205 + sh4r.slice_cycle += sh4_cpu_period;
39.206 +:}
39.207 +JMP @Rn {:
39.208 + sh4r.pc += 2;
39.209 + tmp = sh4r.r[Rn];
39.210 + sh4_finalize_instruction();
39.211 + sh4r.pc = tmp;
39.212 + sh4r.new_pc = tmp + 2;
39.213 + sh4r.slice_cycle += 2*sh4_cpu_period;
39.214 + return;
39.215 +:}
39.216 +JSR @Rn {:
39.217 + /* Note: PR is already set */
39.218 + sh4r.pc += 2;
39.219 + tmp = sh4r.r[Rn];
39.220 + sh4_finalize_instruction();
39.221 + sh4r.pc = tmp;
39.222 + sh4r.new_pc = tmp + 2;
39.223 + sh4r.slice_cycle += 2*sh4_cpu_period;
39.224 + return;
39.225 +:}
39.226 +RTS {:
39.227 + sh4r.pc += 2;
39.228 + sh4_finalize_instruction();
39.229 + sh4r.pc = sh4r.pr;
39.230 + sh4r.new_pc = sh4r.pr + 2;
39.231 + sh4r.slice_cycle += 2*sh4_cpu_period;
39.232 + return;
39.233 +:}
39.234 +RTE {:
39.235 + /* SR is already set */
39.236 + sh4r.pc += 2;
39.237 + sh4_finalize_instruction();
39.238 + sh4r.pc = sh4r.spc;
39.239 + sh4r.new_pc = sh4r.pr + 2;
39.240 + sh4r.slice_cycle += 2*sh4_cpu_period;
39.241 + return;
39.242 +:}
39.243 +MOV.B Rm, @-Rn {: sh4r.r[Rn]--; :}
39.244 +MOV.W Rm, @-Rn {: sh4r.r[Rn] -= 2; :}
39.245 +MOV.L Rm, @-Rn {: sh4r.r[Rn] -= 4; :}
39.246 +MOV.B @Rm+, Rn {: sh4r.r[Rm] ++; :}
39.247 +MOV.W @Rm+, Rn {: sh4r.r[Rm] += 2; :}
39.248 +MOV.L @Rm+, Rn {: sh4r.r[Rm] += 4; :}
39.249 +%%
39.250 + sh4r.pc += 2;
39.251 + sh4r.new_pc = sh4r.pc+2;
39.252 + sh4r.slice_cycle += sh4_cpu_period;
39.253 +}
39.254 +
39.255 +#undef UNDEF(ir)
39.256 +#undef UNIMP(ir)
39.257 +
39.258 +#define UNDEF(ir) return sh4_raise_slot_exception(EXC_ILLEGAL, EXC_SLOT_ILLEGAL)
39.259 +#define UNIMP(ir) do{ ERROR( "Halted on unimplemented instruction at %08x, opcode = %04x", sh4r.pc, ir ); sh4_core_exit(CORE_EXIT_HALT); return FALSE; }while(0)
39.260 +
39.261 +
39.262 gboolean sh4_execute_instruction( void )
39.263 {
39.264 uint32_t pc;
39.265 @@ -260,6 +393,14 @@
39.266 }
39.267 assert( IS_IN_ICACHE(pc) );
39.268 ir = *(uint16_t *)GET_ICACHE_PTR(sh4r.pc);
39.269 +
39.270 + /* FIXME: This is a bit of a hack, but the PC of the delay slot should not
39.271 + * be visible until after the instruction has executed (for exception
39.272 + * correctness)
39.273 + */
39.274 + if( sh4r.in_delay_slot ) {
39.275 + sh4r.pc -= 2;
39.276 + }
39.277 %%
39.278 AND Rm, Rn {: sh4r.r[Rn] &= sh4r.r[Rm]; :}
39.279 AND #imm, R0 {: R0 &= imm; :}
39.280 @@ -351,10 +492,7 @@
39.281 NOP {: /* NOP */ :}
39.282
39.283 PREF @Rn {:
39.284 - tmp = sh4r.r[Rn];
39.285 - if( (tmp & 0xFC000000) == 0xE0000000 ) {
39.286 - sh4_flush_store_queue(tmp);
39.287 - }
39.288 + MEM_PREFETCH(sh4r.r[Rn]);
39.289 :}
39.290 OCBI @Rn {: :}
39.291 OCBP @Rn {: :}
40.1 --- a/src/sh4/sh4dasm.in Mon Dec 15 10:44:56 2008 +0000
40.2 +++ b/src/sh4/sh4dasm.in Tue Jan 13 11:56:28 2009 +0000
40.3 @@ -51,7 +51,8 @@
40.4 uint32_t sh4_disasm_instruction( sh4vma_t pc, char *buf, int len, char *opcode )
40.5 {
40.6 sh4addr_t addr = mmu_vma_to_phys_disasm(pc);
40.7 - uint16_t ir = sh4_read_word(addr);
40.8 + uint32_t tmp;
40.9 + uint16_t ir = ext_address_space[addr>>12]->read_word(addr);
40.10
40.11 #define UNDEF(ir) snprintf( buf, len, "???? " );
40.12 #define RN(ir) ((ir&0x0F00)>>8)
40.13 @@ -189,7 +190,10 @@
40.14 MOV.L @Rm+, Rn {: snprintf( buf, len, "MOV.L @R%d+, R%d", Rm, Rn ); :}
40.15 MOV.L @(R0, Rm), Rn {: snprintf( buf, len, "MOV.L @(R0, R%d), R%d", Rm, Rn ); :}
40.16 MOV.L @(disp, GBR), R0 {: snprintf( buf, len, "MOV.L @(%d, GBR), R0",disp ); :}
40.17 -MOV.L @(disp, PC), Rn {: snprintf( buf, len, "MOV.L @($%xh), R%d ; <- #%08x", disp + (pc & 0xFFFFFFFC) + 4, Rn, sh4_read_long(disp+(addr&0xFFFFFFFC)+4) ); :}
40.18 +MOV.L @(disp, PC), Rn {:
40.19 + tmp = mmu_vma_to_phys_disasm(disp + (pc&0xFFFFFFFC) + 4);
40.20 + snprintf( buf, len, "MOV.L @($%xh), R%d ; <- #%08x", disp + (pc&0xFFFFFFFC)+4, Rn, ext_address_space[tmp>>12]->read_long(tmp) );
40.21 +:}
40.22 MOV.L @(disp, Rm), Rn {: snprintf( buf, len, "MOV.L @(%d, R%d), R%d", disp, Rm, Rn ); :}
40.23 MOV.W Rm, @Rn {: snprintf( buf, len, "MOV.W R%d, @R%d", Rm, Rn ); :}
40.24 MOV.W Rm, @-Rn {: snprintf( buf, len, "MOV.W R%d, @-R%d", Rm, Rn ); :}
40.25 @@ -200,7 +204,10 @@
40.26 MOV.W @Rm+, Rn {: snprintf( buf, len, "MOV.W @R%d+, R%d", Rm, Rn ); :}
40.27 MOV.W @(R0, Rm), Rn {: snprintf( buf, len, "MOV.W @(R0, R%d), R%d", Rm, Rn ); :}
40.28 MOV.W @(disp, GBR), R0 {: snprintf( buf, len, "MOV.W @(%d, GBR), R0", disp ); :}
40.29 -MOV.W @(disp, PC), Rn {: snprintf( buf, len, "MOV.W @($%xh), R%d ; <- #%08x", disp + pc + 4, Rn, sh4_read_word(disp+addr+4) ); :}
40.30 +MOV.W @(disp, PC), Rn {:
40.31 + tmp = mmu_vma_to_phys_disasm(disp+pc+4);
40.32 + snprintf( buf, len, "MOV.W @($%xh), R%d ; <- #%08x", disp+pc+4, Rn, ext_address_space[tmp>>12]->read_word(tmp) );
40.33 +:}
40.34 MOV.W @(disp, Rm), R0 {: snprintf( buf, len, "MOV.W @(%d, R%d), R0", disp, Rm ); :}
40.35 MOVA @(disp, PC), R0 {: snprintf( buf, len, "MOVA @($%xh), R0", disp + (pc&0xFFFFFFFC) + 4 ); :}
40.36 MOVCA.L R0, @Rn {: snprintf( buf, len, "MOVCA.L R0, @R%d", Rn ); :}
41.1 --- a/src/sh4/sh4mem.c Mon Dec 15 10:44:56 2008 +0000
41.2 +++ b/src/sh4/sh4mem.c Tue Jan 13 11:56:28 2009 +0000
41.3 @@ -1,7 +1,8 @@
41.4 /**
41.5 * $Id$
41.6 - * sh4mem.c is responsible for the SH4's access to memory (including memory
41.7 - * mapped I/O), using the page maps created in mem.c
41.8 + *
41.9 + * This is a deprecated module that is not yet completely extricated from the
41.10 + * surrounding code.
41.11 *
41.12 * Copyright (c) 2005 Nathan Keynes.
41.13 *
41.14 @@ -27,375 +28,10 @@
41.15 #include "sh4/sh4core.h"
41.16 #include "sh4/sh4mmio.h"
41.17 #include "sh4/xltcache.h"
41.18 +#include "sh4/mmu.h"
41.19 #include "pvr2/pvr2.h"
41.20 -#include "asic.h"
41.21
41.22 -#define OC_BASE 0x1C000000
41.23 -#define OC_TOP 0x20000000
41.24 -
41.25 -#define TRANSLATE_VIDEO_64BIT_ADDRESS(a) ( (((a)&0x00FFFFF8)>>1)|(((a)&0x00000004)<<20)|((a)&0x03)|0x05000000 )
41.26 -
41.27 -#ifdef ENABLE_WATCH
41.28 -#define CHECK_READ_WATCH( addr, size ) \
41.29 - if( mem_is_watched(addr,size,WATCH_READ) != NULL ) { \
41.30 - WARN( "Watch triggered at %08X by %d byte read", addr, size ); \
41.31 - sh4_core_exit(CORE_EXIT_HALT); \
41.32 - }
41.33 -#define CHECK_WRITE_WATCH( addr, size, val ) \
41.34 - if( mem_is_watched(addr,size,WATCH_WRITE) != NULL ) { \
41.35 - WARN( "Watch triggered at %08X by %d byte write <= %0*X", addr, size, size*2, val ); \
41.36 - sh4_core_exit(CORE_EXIT_HALT); \
41.37 - }
41.38 -#else
41.39 -#define CHECK_READ_WATCH( addr, size )
41.40 -#define CHECK_WRITE_WATCH( addr, size, val )
41.41 -#endif
41.42 -
41.43 -#ifdef ENABLE_TRACE_IO
41.44 -#define TRACE_IO( str, p, r, ... ) if(io_rgn[(uint32_t)p]->trace_flag && !MMIO_NOTRACE_BYNUM((uint32_t)p,r)) \
41.45 - TRACE( str " [%s.%s: %s]", __VA_ARGS__, \
41.46 - MMIO_NAME_BYNUM((uint32_t)p), MMIO_REGID_BYNUM((uint32_t)p, r), \
41.47 - MMIO_REGDESC_BYNUM((uint32_t)p, r) )
41.48 -#define TRACE_P4IO( str, io, r, ... ) if(io->trace_flag && !MMIO_NOTRACE_IOBYNUM(io,r)) \
41.49 - TRACE( str " [%s.%s: %s]", __VA_ARGS__, \
41.50 - io->id, MMIO_REGID_IOBYNUM(io, r), \
41.51 - MMIO_REGDESC_IOBYNUM(io, r) )
41.52 -#else
41.53 -#define TRACE_IO( str, p, r, ... )
41.54 -#define TRACE_P4IO( str, io, r, ... )
41.55 -#endif
41.56 -
41.57 -extern struct mem_region mem_rgn[];
41.58 -extern struct mmio_region *P4_io[];
41.59 -
41.60 -int32_t FASTCALL sh4_read_p4( sh4addr_t addr )
41.61 -{
41.62 - struct mmio_region *io = P4_io[(addr&0x1FFFFFFF)>>19];
41.63 - if( !io ) {
41.64 - switch( addr & 0x1F000000 ) {
41.65 - case 0x00000000: case 0x01000000: case 0x02000000: case 0x03000000:
41.66 - /* Store queue - readable? */
41.67 - return 0;
41.68 - break;
41.69 - case 0x10000000: return mmu_icache_addr_read( addr );
41.70 - case 0x11000000: return mmu_icache_data_read( addr );
41.71 - case 0x12000000: return mmu_itlb_addr_read( addr );
41.72 - case 0x13000000: return mmu_itlb_data_read( addr );
41.73 - case 0x14000000: return mmu_ocache_addr_read( addr );
41.74 - case 0x15000000: return mmu_ocache_data_read( addr );
41.75 - case 0x16000000: return mmu_utlb_addr_read( addr );
41.76 - case 0x17000000: return mmu_utlb_data_read( addr );
41.77 - default:
41.78 - WARN( "Attempted read from unknown or invalid P4 region: %08X", addr );
41.79 - return 0;
41.80 - }
41.81 - } else {
41.82 - int32_t val = io->io_read( addr&0xFFF );
41.83 - TRACE_P4IO( "Long read %08X <= %08X", io, (addr&0xFFF), val, addr );
41.84 - return val;
41.85 - }
41.86 -}
41.87 -
41.88 -void FASTCALL sh4_write_p4( sh4addr_t addr, int32_t val )
41.89 -{
41.90 - struct mmio_region *io = P4_io[(addr&0x1FFFFFFF)>>19];
41.91 - if( !io ) {
41.92 - switch( addr & 0x1F000000 ) {
41.93 - case 0x00000000: case 0x01000000: case 0x02000000: case 0x03000000:
41.94 - /* Store queue */
41.95 - SH4_WRITE_STORE_QUEUE( addr, val );
41.96 - break;
41.97 - case 0x10000000: mmu_icache_addr_write( addr, val ); break;
41.98 - case 0x11000000: mmu_icache_data_write( addr, val ); break;
41.99 - case 0x12000000: mmu_itlb_addr_write( addr, val ); break;
41.100 - case 0x13000000: mmu_itlb_data_write( addr, val ); break;
41.101 - case 0x14000000: mmu_ocache_addr_write( addr, val ); break;
41.102 - case 0x15000000: mmu_ocache_data_write( addr, val ); break;
41.103 - case 0x16000000: mmu_utlb_addr_write( addr, val ); break;
41.104 - case 0x17000000: mmu_utlb_data_write( addr, val ); break;
41.105 - default:
41.106 - if( (addr & 0xFFFF0000 ) == 0xFF940000 ||
41.107 - (addr & 0xFFFF0000 ) == 0xFF900000 ) {
41.108 - // SDRAM configuration, ignore for now
41.109 - } else {
41.110 - WARN( "Attempted write to unknown P4 region: %08X", addr );
41.111 - }
41.112 - }
41.113 - } else {
41.114 - TRACE_P4IO( "Long write %08X => %08X", io, (addr&0xFFF), val, addr );
41.115 - io->io_write( addr&0xFFF, val );
41.116 - }
41.117 -}
41.118 -
41.119 -int32_t sh4_read_phys_word( sh4addr_t addr )
41.120 -{
41.121 - sh4ptr_t page;
41.122 - if( addr >= 0xE0000000 ) /* P4 Area, handled specially */
41.123 - return SIGNEXT16(sh4_read_p4( addr ));
41.124 -
41.125 - if( (addr&0x1F800000) == 0x04000000 ) {
41.126 - addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
41.127 - }
41.128 -
41.129 - page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
41.130 - if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
41.131 - if( page == NULL ) {
41.132 - WARN( "Attempted word read to missing page: %08X",
41.133 - addr );
41.134 - return 0;
41.135 - }
41.136 - return SIGNEXT16(io_rgn[(uintptr_t)page]->io_read(addr&0xFFF));
41.137 - } else {
41.138 - return SIGNEXT16(*(int16_t *)(page+(addr&0xFFF)));
41.139 - }
41.140 -}
41.141 -
41.142 -/**
41.143 - * Convenience function to read a quad-word (implemented as two long reads).
41.144 - */
41.145 -int64_t FASTCALL sh4_read_quad( sh4addr_t addr )
41.146 -{
41.147 - return ((int64_t)((uint32_t)sh4_read_long(addr))) |
41.148 - (((int64_t)((uint32_t)sh4_read_long(addr+4))) << 32);
41.149 -}
41.150 -
41.151 -int32_t FASTCALL sh4_read_long( sh4addr_t addr )
41.152 -{
41.153 - sh4ptr_t page;
41.154 -
41.155 - CHECK_READ_WATCH(addr,4);
41.156 -
41.157 - if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */
41.158 - return ZEROEXT32(sh4_read_p4( addr ));
41.159 - } else if( (addr&0x1C000000) == 0x0C000000 ) {
41.160 - return ZEROEXT32(*(int32_t *)(sh4_main_ram + (addr&0x00FFFFFF)));
41.161 - } else if( (addr&0x1F800000) == 0x04000000 ) {
41.162 - addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
41.163 - pvr2_render_buffer_invalidate(addr, FALSE);
41.164 - } else if( (addr&0x1F800000) == 0x05000000 ) {
41.165 - pvr2_render_buffer_invalidate(addr, FALSE);
41.166 - }
41.167 -
41.168 - page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
41.169 - if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
41.170 - int32_t val;
41.171 - if( page == NULL ) {
41.172 - WARN( "Attempted long read to missing page: %08X", addr );
41.173 - return 0;
41.174 - }
41.175 - val = io_rgn[(uintptr_t)page]->io_read(addr&0xFFF);
41.176 - TRACE_IO( "Long read %08X <= %08X", page, (addr&0xFFF), val, addr );
41.177 - return ZEROEXT32(val);
41.178 - } else {
41.179 - return ZEROEXT32(*(int32_t *)(page+(addr&0xFFF)));
41.180 - }
41.181 -}
41.182 -
41.183 -int32_t FASTCALL sh4_read_word( sh4addr_t addr )
41.184 -{
41.185 - sh4ptr_t page;
41.186 -
41.187 - CHECK_READ_WATCH(addr,2);
41.188 -
41.189 - if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */
41.190 - return ZEROEXT32(SIGNEXT16(sh4_read_p4( addr )));
41.191 - } else if( (addr&0x1C000000) == 0x0C000000 ) {
41.192 - return ZEROEXT32(SIGNEXT16(*(int16_t *)(sh4_main_ram + (addr&0x00FFFFFF))));
41.193 - } else if( (addr&0x1F800000) == 0x04000000 ) {
41.194 - addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
41.195 - pvr2_render_buffer_invalidate(addr, FALSE);
41.196 - } else if( (addr&0x1F800000) == 0x05000000 ) {
41.197 - pvr2_render_buffer_invalidate(addr, FALSE);
41.198 - }
41.199 -
41.200 - page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
41.201 - if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
41.202 - int32_t val;
41.203 - if( page == NULL ) {
41.204 - WARN( "Attempted word read to missing page: %08X", addr );
41.205 - return 0;
41.206 - }
41.207 - val = SIGNEXT16(io_rgn[(uintptr_t)page]->io_read(addr&0xFFF));
41.208 - TRACE_IO( "Word read %04X <= %08X", page, (addr&0xFFF), val&0xFFFF, addr );
41.209 - return ZEROEXT32(val);
41.210 - } else {
41.211 - return ZEROEXT32(SIGNEXT16(*(int16_t *)(page+(addr&0xFFF))));
41.212 - }
41.213 -}
41.214 -
41.215 -int32_t FASTCALL sh4_read_byte( sh4addr_t addr )
41.216 -{
41.217 - sh4ptr_t page;
41.218 -
41.219 - CHECK_READ_WATCH(addr,1);
41.220 -
41.221 - if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */
41.222 - return ZEROEXT32(SIGNEXT8(sh4_read_p4( addr )));
41.223 - } else if( (addr&0x1C000000) == 0x0C000000 ) {
41.224 - return ZEROEXT32(SIGNEXT8(*(int8_t *)(sh4_main_ram + (addr&0x00FFFFFF))));
41.225 - } else if( (addr&0x1F800000) == 0x04000000 ) {
41.226 - addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
41.227 - pvr2_render_buffer_invalidate(addr, FALSE);
41.228 - } else if( (addr&0x1F800000) == 0x05000000 ) {
41.229 - pvr2_render_buffer_invalidate(addr, FALSE);
41.230 - }
41.231 -
41.232 -
41.233 - page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
41.234 - if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
41.235 - int32_t val;
41.236 - if( page == NULL ) {
41.237 - WARN( "Attempted byte read to missing page: %08X", addr );
41.238 - return 0;
41.239 - }
41.240 - val = SIGNEXT8(io_rgn[(uintptr_t)page]->io_read(addr&0xFFF));
41.241 - TRACE_IO( "Byte read %02X <= %08X", page, (addr&0xFFF), val&0xFF, addr );
41.242 - return ZEROEXT32(val);
41.243 - } else {
41.244 - return ZEROEXT32(SIGNEXT8(*(int8_t *)(page+(addr&0xFFF))));
41.245 - }
41.246 -}
41.247 -
41.248 -/**
41.249 - * Convenience function to write a quad-word (implemented as two long writes).
41.250 - */
41.251 -void FASTCALL sh4_write_quad( sh4addr_t addr, uint64_t val )
41.252 -{
41.253 - sh4_write_long( addr, (uint32_t)val );
41.254 - sh4_write_long( addr+4, (uint32_t)(val>>32) );
41.255 -}
41.256 -
41.257 -void FASTCALL sh4_write_long( sh4addr_t addr, uint32_t val )
41.258 -{
41.259 - sh4ptr_t page;
41.260 -
41.261 - CHECK_WRITE_WATCH(addr,4,val);
41.262 -
41.263 - if( addr >= 0xE0000000 ) {
41.264 - if( addr < 0xE4000000 ) { // Shortcut for the extremely common case
41.265 - SH4_WRITE_STORE_QUEUE( addr, val );
41.266 - } else {
41.267 - sh4_write_p4( addr, val );
41.268 - }
41.269 - return;
41.270 - } else if( (addr&0x1C000000) == 0x0C000000 ) {
41.271 - *(uint32_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val;
41.272 - xlat_invalidate_long(addr);
41.273 - return;
41.274 - } else if( (addr&0x1F800000) == 0x04000000 ||
41.275 - (addr&0x1F800000) == 0x11000000 ) {
41.276 - texcache_invalidate_page(addr& 0x7FFFFF);
41.277 - addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
41.278 - pvr2_render_buffer_invalidate(addr, TRUE);
41.279 - } else if( (addr&0x1F800000) == 0x05000000 ) {
41.280 - pvr2_render_buffer_invalidate(addr, TRUE);
41.281 - }
41.282 -
41.283 - if( (addr&0x1FFFFFFF) < 0x200000 ) {
41.284 - WARN( "Attempted write to read-only memory: %08X => %08X", val, addr);
41.285 - sh4_stop();
41.286 - return;
41.287 - }
41.288 - if( (addr&0x1F800000) == 0x00800000 )
41.289 - asic_g2_write_word();
41.290 -
41.291 - page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
41.292 - if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
41.293 - if( page == NULL ) {
41.294 - if( (addr & 0x1F000000) >= 0x04000000 &&
41.295 - (addr & 0x1F000000) < 0x07000000 )
41.296 - return;
41.297 - WARN( "Long write to missing page: %08X => %08X", val, addr );
41.298 - return;
41.299 - }
41.300 - TRACE_IO( "Long write %08X => %08X", page, (addr&0xFFF), val, addr );
41.301 - io_rgn[(uintptr_t)page]->io_write(addr&0xFFF, val);
41.302 - } else {
41.303 - *(uint32_t *)(page+(addr&0xFFF)) = val;
41.304 - }
41.305 -}
41.306 -
41.307 -void FASTCALL sh4_write_word( sh4addr_t addr, uint32_t val )
41.308 -{
41.309 - sh4ptr_t page;
41.310 -
41.311 - CHECK_WRITE_WATCH(addr,2,val);
41.312 -
41.313 - if( addr >= 0xE0000000 ) {
41.314 - sh4_write_p4( addr, (int16_t)val );
41.315 - return;
41.316 - } else if( (addr&0x1C000000) == 0x0C000000 ) {
41.317 - *(uint16_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val;
41.318 - xlat_invalidate_word(addr);
41.319 - return;
41.320 - } else if( (addr&0x1F800000) == 0x04000000 ||
41.321 - (addr&0x1F800000) == 0x11000000 ) {
41.322 - texcache_invalidate_page(addr& 0x7FFFFF);
41.323 - addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
41.324 - pvr2_render_buffer_invalidate(addr, TRUE);
41.325 - } else if( (addr&0x1F800000) == 0x05000000 ) {
41.326 - pvr2_render_buffer_invalidate(addr, TRUE);
41.327 - }
41.328 -
41.329 - if( (addr&0x1FFFFFFF) < 0x200000 ) {
41.330 - WARN( "Attempted write to read-only memory: %08X => %08X", val, addr);
41.331 - sh4_stop();
41.332 - return;
41.333 - }
41.334 - page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
41.335 - if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
41.336 - if( page == NULL ) {
41.337 - WARN( "Attempted word write to missing page: %08X", addr );
41.338 - return;
41.339 - }
41.340 - TRACE_IO( "Word write %04X => %08X", page, (addr&0xFFF), val&0xFFFF, addr );
41.341 - io_rgn[(uintptr_t)page]->io_write(addr&0xFFF, val);
41.342 - } else {
41.343 - *(uint16_t *)(page+(addr&0xFFF)) = val;
41.344 - }
41.345 -}
41.346 -
41.347 -void FASTCALL sh4_write_byte( sh4addr_t addr, uint32_t val )
41.348 -{
41.349 - sh4ptr_t page;
41.350 -
41.351 - CHECK_WRITE_WATCH(addr,1,val);
41.352 -
41.353 - if( addr >= 0xE0000000 ) {
41.354 - sh4_write_p4( addr, (int8_t)val );
41.355 - return;
41.356 - } else if( (addr&0x1C000000) == 0x0C000000 ) {
41.357 - *(uint8_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val;
41.358 - xlat_invalidate_word(addr);
41.359 - return;
41.360 - } else if( (addr&0x1F800000) == 0x04000000 ||
41.361 - (addr&0x1F800000) == 0x11000000 ) {
41.362 - texcache_invalidate_page(addr& 0x7FFFFF);
41.363 - addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
41.364 - pvr2_render_buffer_invalidate(addr, TRUE);
41.365 - } else if( (addr&0x1F800000) == 0x05000000 ) {
41.366 - pvr2_render_buffer_invalidate(addr, TRUE);
41.367 - }
41.368 -
41.369 - if( (addr&0x1FFFFFFF) < 0x200000 ) {
41.370 - WARN( "Attempted write to read-only memory: %08X => %08X", val, addr);
41.371 - sh4_stop();
41.372 - return;
41.373 - }
41.374 - page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
41.375 - if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
41.376 - if( page == NULL ) {
41.377 - WARN( "Attempted byte write to missing page: %08X", addr );
41.378 - return;
41.379 - }
41.380 - TRACE_IO( "Byte write %02X => %08X", page, (addr&0xFFF), val&0xFF, addr );
41.381 - io_rgn[(uintptr_t)page]->io_write( (addr&0xFFF), val);
41.382 - } else {
41.383 - *(uint8_t *)(page+(addr&0xFFF)) = val;
41.384 - }
41.385 -}
41.386 -
41.387 -
41.388 +/************** Obsolete methods ***************/
41.389
41.390 /* FIXME: Handle all the many special cases when the range doesn't fall cleanly
41.391 * into the same memory block
41.392 @@ -431,4 +67,3 @@
41.393 memcpy( dest, src, count );
41.394 }
41.395 }
41.396 -
42.1 --- a/src/sh4/sh4mmio.c Mon Dec 15 10:44:56 2008 +0000
42.2 +++ b/src/sh4/sh4mmio.c Tue Jan 13 11:56:28 2009 +0000
42.3 @@ -72,9 +72,10 @@
42.4
42.5 MMIO_REGION_WRITE_DEFFN(BSC)
42.6
42.7 -int32_t mmio_region_BSC_read( uint32_t reg )
42.8 +MMIO_REGION_READ_FN( BSC, reg )
42.9 {
42.10 int32_t val;
42.11 + reg &= 0xFFF;
42.12 switch( reg ) {
42.13 case PDTRA:
42.14 val = bsc_read_pdtra();
42.15 @@ -90,13 +91,14 @@
42.16
42.17 /********************************* UBC *************************************/
42.18
42.19 -int32_t mmio_region_UBC_read( uint32_t reg )
42.20 +MMIO_REGION_READ_FN( UBC, reg )
42.21 {
42.22 - return MMIO_READ( UBC, reg );
42.23 + return MMIO_READ( UBC, reg & 0xFFF );
42.24 }
42.25
42.26 -void mmio_region_UBC_write( uint32_t reg, uint32_t val )
42.27 +MMIO_REGION_WRITE_FN( UBC, reg, val )
42.28 {
42.29 + reg &= 0xFFF;
42.30 switch( reg ) {
42.31 case BAMRA:
42.32 case BAMRB:
43.1 --- a/src/sh4/sh4mmio.h Mon Dec 15 10:44:56 2008 +0000
43.2 +++ b/src/sh4/sh4mmio.h Tue Jan 13 11:56:28 2009 +0000
43.3 @@ -203,19 +203,6 @@
43.4 MMIO_REGION( PMM )
43.5 MMIO_REGION_LIST_END
43.6
43.7 -/* mmucr register bits */
43.8 -#define MMUCR_AT 0x00000001 /* Address Translation enabled */
43.9 -#define MMUCR_TI 0x00000004 /* TLB invalidate (always read as 0) */
43.10 -#define MMUCR_SV 0x00000100 /* Single Virtual mode=1 / multiple virtual=0 */
43.11 -#define MMUCR_SQMD 0x00000200 /* Store queue mode bit (0=user, 1=priv only) */
43.12 -#define MMUCR_URC 0x0000FC00 /* UTLB access counter */
43.13 -#define MMUCR_URB 0x00FC0000 /* UTLB entry boundary */
43.14 -#define MMUCR_LRUI 0xFC000000 /* Least recently used ITLB */
43.15 -#define MMUCR_MASK 0xFCFCFF05
43.16 -#define MMUCR_RMASK 0xFCFCFF01 /* Read mask */
43.17 -
43.18 -#define IS_MMU_ENABLED() (MMIO_READ(MMU, MMUCR)&MMUCR_AT)
43.19 -
43.20 /* ccr register bits */
43.21 #define CCR_IIX 0x00008000 /* IC index enable */
43.22 #define CCR_ICI 0x00000800 /* IC invalidation (always read as 0) */
43.23 @@ -243,24 +230,6 @@
43.24 void mmu_set_cache_mode( int );
43.25 void mmu_ldtlb(void);
43.26
43.27 -int32_t mmu_icache_addr_read( sh4addr_t addr );
43.28 -int32_t mmu_icache_data_read( sh4addr_t addr );
43.29 -int32_t mmu_itlb_addr_read( sh4addr_t addr );
43.30 -int32_t mmu_itlb_data_read( sh4addr_t addr );
43.31 -int32_t mmu_ocache_addr_read( sh4addr_t addr );
43.32 -int32_t mmu_ocache_data_read( sh4addr_t addr );
43.33 -int32_t mmu_utlb_addr_read( sh4addr_t addr );
43.34 -int32_t mmu_utlb_data_read( sh4addr_t addr );
43.35 -void mmu_icache_addr_write( sh4addr_t addr, uint32_t val );
43.36 -void mmu_icache_data_write( sh4addr_t addr, uint32_t val );
43.37 -void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val );
43.38 -void mmu_itlb_data_write( sh4addr_t addr, uint32_t val );
43.39 -void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val );
43.40 -void mmu_ocache_data_write( sh4addr_t addr, uint32_t val );
43.41 -void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val );
43.42 -void mmu_utlb_data_write( sh4addr_t addr, uint32_t val );
43.43 -
43.44 -
43.45 #ifdef __cplusplus
43.46 }
43.47 #endif
44.1 --- a/src/sh4/sh4stat.in Mon Dec 15 10:44:56 2008 +0000
44.2 +++ b/src/sh4/sh4stat.in Tue Jan 13 11:56:28 2009 +0000
44.3 @@ -19,6 +19,7 @@
44.4 #include "dream.h"
44.5 #include "sh4/sh4stat.h"
44.6 #include "sh4/sh4core.h"
44.7 +#include "sh4/mmu.h"
44.8
44.9 static uint64_t sh4_stats[SH4_INSTRUCTION_COUNT+1];
44.10 static uint64_t sh4_stats_total;
44.11 @@ -194,7 +195,8 @@
44.12
44.13 void sh4_stats_add_by_pc( uint32_t pc )
44.14 {
44.15 - uint16_t ir = sh4_read_word(pc);
44.16 + sh4addr_t addr = mmu_vma_to_phys_disasm(pc);
44.17 + uint16_t ir = ext_address_space[addr>>12]->read_word(addr);
44.18 #define UNDEF(ir) sh4_stats[0]++
44.19 %%
44.20 ADD Rm, Rn {: sh4_stats[I_ADD]++; :}
45.1 --- a/src/sh4/sh4trans.c Mon Dec 15 10:44:56 2008 +0000
45.2 +++ b/src/sh4/sh4trans.c Tue Jan 13 11:56:28 2009 +0000
45.3 @@ -52,9 +52,11 @@
45.4 }
45.5
45.6 code = xlat_get_code_by_vma( sh4r.pc );
45.7 - if( code == NULL || (sh4r.fpscr & (FPSCR_PR|FPSCR_SZ)) != XLAT_BLOCK_FPSCR(code) ) {
45.8 + if( code == NULL || sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
45.9 code = sh4_translate_basic_block( sh4r.pc );
45.10 }
45.11 + } else if( sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
45.12 + code = sh4_translate_basic_block( sh4r.pc );
45.13 }
45.14 code = code();
45.15 }
45.16 @@ -139,8 +141,7 @@
45.17 memcpy( xlat_output, xlat_recovery, recovery_size);
45.18 xlat_current_block->recover_table_offset = xlat_output - (uint8_t *)xlat_current_block->code;
45.19 xlat_current_block->recover_table_size = xlat_recovery_posn;
45.20 - xlat_current_block->fpscr = sh4r.fpscr & (FPSCR_PR|FPSCR_SZ);
45.21 - xlat_current_block->fpscr_mask = (FPSCR_PR|FPSCR_SZ);
45.22 + xlat_current_block->xlat_sh4_mode = sh4r.xlat_sh4_mode;
45.23 xlat_commit_block( finalsize, pc-start );
45.24 return xlat_current_block->code;
45.25 }
45.26 @@ -157,6 +158,16 @@
45.27 sh4r.pc += (recovery->sh4_icount<<1);
45.28 }
45.29
45.30 +/**
45.31 + * Same as sh4_translate_run_recovery, but is used to recover from a taken
45.32 + * exception - that is, it fixes sh4r.spc rather than sh4r.pc
45.33 + */
45.34 +void sh4_translate_run_exception_recovery( xlat_recovery_record_t recovery )
45.35 +{
45.36 + sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
45.37 + sh4r.spc += (recovery->sh4_icount<<1);
45.38 +}
45.39 +
45.40 void sh4_translate_exit_recover( )
45.41 {
45.42 void *code = xlat_get_code_by_vma( sh4r.pc );
45.43 @@ -165,7 +176,7 @@
45.44 void *pc = xlat_get_native_pc( code, size );
45.45 if( pc != NULL ) {
45.46 // could be null if we're not actually running inside the translator
45.47 - xlat_recovery_record_t recover = xlat_get_post_recovery(code, pc, TRUE);
45.48 + xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
45.49 if( recover != NULL ) {
45.50 // Can be null if there is no recovery necessary
45.51 sh4_translate_run_recovery(recover);
45.52 @@ -174,6 +185,24 @@
45.53 }
45.54 }
45.55
45.56 +void sh4_translate_exception_exit_recover( )
45.57 +{
45.58 + void *code = xlat_get_code_by_vma( sh4r.spc );
45.59 + if( code != NULL ) {
45.60 + uint32_t size = xlat_get_code_size( code );
45.61 + void *pc = xlat_get_native_pc( code, size );
45.62 + if( pc != NULL ) {
45.63 + // could be null if we're not actually running inside the translator
45.64 + xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
45.65 + if( recover != NULL ) {
45.66 + // Can be null if there is no recovery necessary
45.67 + sh4_translate_run_exception_recovery(recover);
45.68 + }
45.69 + }
45.70 + }
45.71 +
45.72 +}
45.73 +
45.74 void FASTCALL sh4_translate_breakpoint_hit(uint32_t pc)
45.75 {
45.76 if( sh4_starting && sh4r.slice_cycle == 0 && pc == sh4r.pc ) {
45.77 @@ -182,38 +211,6 @@
45.78 sh4_core_exit( CORE_EXIT_BREAKPOINT );
45.79 }
45.80
45.81 -/**
45.82 - * Exit the current block at the end of the current instruction, flush the
45.83 - * translation cache (completely) and return control to sh4_xlat_run_slice.
45.84 - *
45.85 - * As a special case, if the current instruction is actually the last
45.86 - * instruction in the block (ie it's in a delay slot), this function
45.87 - * returns to allow normal completion of the translation block. Otherwise
45.88 - * this function never returns.
45.89 - *
45.90 - * Must only be invoked (indirectly) from within translated code.
45.91 - */
45.92 -gboolean sh4_translate_flush_cache()
45.93 -{
45.94 - void *code = xlat_get_code_by_vma( sh4r.pc );
45.95 - if( code != NULL ) {
45.96 - uint32_t size = xlat_get_code_size( code );
45.97 - void *pc = xlat_get_native_pc( code, size );
45.98 - assert( pc != NULL );
45.99 -
45.100 - xlat_recovery_record_t recover = xlat_get_post_recovery(code, pc, FALSE);
45.101 - if( recover != NULL ) {
45.102 - // Can be null if there is no recovery necessary
45.103 - sh4_translate_run_recovery(recover);
45.104 - xlat_flush_cache();
45.105 - return TRUE;
45.106 - } else {
45.107 - xlat_flush_cache();
45.108 - return FALSE;
45.109 - }
45.110 - }
45.111 -}
45.112 -
45.113 void * FASTCALL xlat_get_code_by_vma( sh4vma_t vma )
45.114 {
45.115 void *result = NULL;
46.1 --- a/src/sh4/sh4trans.h Mon Dec 15 10:44:56 2008 +0000
46.2 +++ b/src/sh4/sh4trans.h Tue Jan 13 11:56:28 2009 +0000
46.3 @@ -113,6 +113,12 @@
46.4 void sh4_translate_exit_recover( );
46.5
46.6 /**
46.7 + * Called when doing a break out of the translator following a taken exception -
46.8 + * finalizes the system state up to the start of the current instruction.
46.9 + */
46.10 +void sh4_translate_exception_exit_recover( );
46.11 +
46.12 +/**
46.13 * From within the translator, exit the current block at the end of the
46.14 * current instruction, flush the translation cache (completely)
46.15 * @return TRUE to perform a vm-exit/continue after the flush
47.1 --- a/src/sh4/sh4x86.in Mon Dec 15 10:44:56 2008 +0000
47.2 +++ b/src/sh4/sh4x86.in Tue Jan 13 11:56:28 2009 +0000
47.3 @@ -32,6 +32,7 @@
47.4 #include "sh4/sh4stat.h"
47.5 #include "sh4/sh4mmio.h"
47.6 #include "sh4/x86op.h"
47.7 +#include "sh4/mmu.h"
47.8 #include "clock.h"
47.9
47.10 #define DEFAULT_BACKPATCH_SIZE 4096
47.11 @@ -53,7 +54,6 @@
47.12 */
47.13 struct sh4_x86_state {
47.14 int in_delay_slot;
47.15 - gboolean priv_checked; /* true if we've already checked the cpu mode. */
47.16 gboolean fpuen_checked; /* true if we've already checked fpu enabled. */
47.17 gboolean branch_taken; /* true if we branched unconditionally */
47.18 gboolean double_prec; /* true if FPU is in double-precision mode */
47.19 @@ -178,6 +178,7 @@
47.20 OP32(value);
47.21 }
47.22
47.23 +
47.24 /**
47.25 * Load an immediate 64-bit quantity (note: x86-64 only)
47.26 */
47.27 @@ -238,17 +239,15 @@
47.28 /* Exception checks - Note that all exception checks will clobber EAX */
47.29
47.30 #define check_priv( ) \
47.31 - if( !sh4_x86.priv_checked ) { \
47.32 - sh4_x86.priv_checked = TRUE;\
47.33 - load_spreg( R_EAX, R_SR );\
47.34 - AND_imm32_r32( SR_MD, R_EAX );\
47.35 - if( sh4_x86.in_delay_slot ) {\
47.36 - JE_exc( EXC_SLOT_ILLEGAL );\
47.37 - } else {\
47.38 - JE_exc( EXC_ILLEGAL );\
47.39 - }\
47.40 - sh4_x86.tstate = TSTATE_NONE; \
47.41 - }\
47.42 + if( (sh4r.xlat_sh4_mode & SR_MD) == 0 ) { \
47.43 + if( sh4_x86.in_delay_slot ) { \
47.44 + JMP_exc(EXC_SLOT_ILLEGAL); \
47.45 + } else { \
47.46 + JMP_exc(EXC_ILLEGAL ); \
47.47 + } \
47.48 + sh4_x86.in_delay_slot = DELAY_NONE; \
47.49 + return 2; \
47.50 + }
47.51
47.52 #define check_fpuen( ) \
47.53 if( !sh4_x86.fpuen_checked ) {\
47.54 @@ -288,32 +287,33 @@
47.55 JNE_exc(EXC_DATA_ADDR_WRITE);
47.56
47.57 #define UNDEF(ir)
47.58 +#define MEM_REGION_PTR(name) offsetof( struct mem_region_fn, name )
47.59 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
47.60 -#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
47.61 -#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
47.62 -#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
47.63 -#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
47.64 -#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
47.65 -#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
47.66 +/* Note: For SR.MD == 1 && MMUCR.AT == 0, there are no memory exceptions, so
47.67 + * don't waste the cycles expecting them. Otherwise we need to save the exception pointer.
47.68 + */
47.69 +
47.70 +#ifdef HAVE_FRAME_ADDRESS
47.71 +#define _CALL_READ(addr_reg, fn) if( !sh4_x86.tlb_on && (sh4r.xlat_sh4_mode & SR_MD) ) { \
47.72 + call_func1_r32disp8(R_ECX, MEM_REGION_PTR(fn), addr_reg); } else { \
47.73 + call_func1_r32disp8_exc(R_ECX, MEM_REGION_PTR(fn), addr_reg, pc); }
47.74 +#define _CALL_WRITE(addr_reg, val_reg, fn) if( !sh4_x86.tlb_on && (sh4r.xlat_sh4_mode & SR_MD) ) { \
47.75 + call_func2_r32disp8(R_ECX, MEM_REGION_PTR(fn), addr_reg, val_reg); } else { \
47.76 + call_func2_r32disp8_exc(R_ECX, MEM_REGION_PTR(fn), addr_reg, val_reg, pc); }
47.77 +#else
47.78 +#define _CALL_READ(addr_reg, fn) call_func1_r32disp8(R_ECX, MEM_REGION_PTR(fn), addr_reg)
47.79 +#define _CALL_WRITE(addr_reg, val_reg, fn) call_func2_r32disp8(R_ECX, MEM_REGION_PTR(fn), addr_reg, val_reg)
47.80 +#endif
47.81 +
47.82 +#define MEM_READ_BYTE( addr_reg, value_reg ) decode_address(addr_reg); _CALL_READ(addr_reg, read_byte); MEM_RESULT(value_reg)
47.83 +#define MEM_READ_WORD( addr_reg, value_reg ) decode_address(addr_reg); _CALL_READ(addr_reg, read_word); MEM_RESULT(value_reg)
47.84 +#define MEM_READ_LONG( addr_reg, value_reg ) decode_address(addr_reg); _CALL_READ(addr_reg, read_long); MEM_RESULT(value_reg)
47.85 +#define MEM_WRITE_BYTE( addr_reg, value_reg ) decode_address(addr_reg); _CALL_WRITE(addr_reg, value_reg, write_byte)
47.86 +#define MEM_WRITE_WORD( addr_reg, value_reg ) decode_address(addr_reg); _CALL_WRITE(addr_reg, value_reg, write_word)
47.87 +#define MEM_WRITE_LONG( addr_reg, value_reg ) decode_address(addr_reg); _CALL_WRITE(addr_reg, value_reg, write_long)
47.88 +#define MEM_PREFETCH( addr_reg ) decode_address(addr_reg); _CALL_READ(addr_reg, prefetch)
47.89
47.90 -#ifdef HAVE_FRAME_ADDRESS
47.91 -/**
47.92 - * Perform MMU translation on the address in addr_reg for a read operation, iff the TLB is turned
47.93 - * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
47.94 - */
47.95 -#define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1_exc(mmu_vma_to_phys_read, addr_reg, pc); MEM_RESULT(addr_reg); }
47.96 -
47.97 -/**
47.98 - * Perform MMU translation on the address in addr_reg for a write operation, iff the TLB is turned
47.99 - * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
47.100 - */
47.101 -#define MMU_TRANSLATE_WRITE( addr_reg ) if( sh4_x86.tlb_on ) { call_func1_exc(mmu_vma_to_phys_write, addr_reg, pc); MEM_RESULT(addr_reg); }
47.102 -#else
47.103 -#define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
47.104 -#define MMU_TRANSLATE_WRITE( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
47.105 -#endif
47.106 -
47.107 -#define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = DELAY_NONE; return 1;
47.108 +#define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = DELAY_NONE; return 2;
47.109
47.110 /****** Import appropriate calling conventions ******/
47.111 #if SIZEOF_VOID_P == 8
47.112 @@ -326,12 +326,11 @@
47.113 {
47.114 enter_block();
47.115 sh4_x86.in_delay_slot = FALSE;
47.116 - sh4_x86.priv_checked = FALSE;
47.117 sh4_x86.fpuen_checked = FALSE;
47.118 sh4_x86.branch_taken = FALSE;
47.119 sh4_x86.backpatch_posn = 0;
47.120 sh4_x86.block_start_pc = pc;
47.121 - sh4_x86.tlb_on = IS_MMU_ENABLED();
47.122 + sh4_x86.tlb_on = IS_TLB_ENABLED();
47.123 sh4_x86.tstate = TSTATE_NONE;
47.124 sh4_x86.double_prec = sh4r.fpscr & FPSCR_PR;
47.125 sh4_x86.double_size = sh4r.fpscr & FPSCR_SZ;
47.126 @@ -424,9 +423,7 @@
47.127 :}
47.128 ADD #imm, Rn {:
47.129 COUNT_INST(I_ADDI);
47.130 - load_reg( R_EAX, Rn );
47.131 - ADD_imm8s_r32( imm, R_EAX );
47.132 - store_reg( R_EAX, Rn );
47.133 + ADD_imm8s_sh4r( imm, REG_OFFSET(r[Rn]) );
47.134 sh4_x86.tstate = TSTATE_NONE;
47.135 :}
47.136 ADDC Rm, Rn {:
47.137 @@ -468,9 +465,7 @@
47.138 AND.B #imm, @(R0, GBR) {:
47.139 COUNT_INST(I_ANDB);
47.140 load_reg( R_EAX, 0 );
47.141 - load_spreg( R_ECX, R_GBR );
47.142 - ADD_r32_r32( R_ECX, R_EAX );
47.143 - MMU_TRANSLATE_WRITE( R_EAX );
47.144 + ADD_sh4r_r32( R_GBR, R_EAX );
47.145 MOV_r32_esp8(R_EAX, 0);
47.146 MEM_READ_BYTE( R_EAX, R_EDX );
47.147 MOV_esp8_r32(0, R_EAX);
47.148 @@ -659,32 +654,25 @@
47.149 if( Rm == Rn ) {
47.150 load_reg( R_EAX, Rm );
47.151 check_ralign32( R_EAX );
47.152 - MMU_TRANSLATE_READ( R_EAX );
47.153 + MEM_READ_LONG( R_EAX, R_EAX );
47.154 MOV_r32_esp8(R_EAX, 0);
47.155 - load_reg( R_EAX, Rn );
47.156 - ADD_imm8s_r32( 4, R_EAX );
47.157 - MMU_TRANSLATE_READ( R_EAX );
47.158 - ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rn]) );
47.159 - // Note translate twice in case of page boundaries. Maybe worth
47.160 - // adding a page-boundary check to skip the second translation
47.161 + load_reg( R_EAX, Rm );
47.162 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
47.163 + MEM_READ_LONG( R_EAX, R_EAX );
47.164 + ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rn]) );
47.165 } else {
47.166 load_reg( R_EAX, Rm );
47.167 check_ralign32( R_EAX );
47.168 - MMU_TRANSLATE_READ( R_EAX );
47.169 + MEM_READ_LONG( R_EAX, R_EAX );
47.170 MOV_r32_esp8( R_EAX, 0 );
47.171 load_reg( R_EAX, Rn );
47.172 check_ralign32( R_EAX );
47.173 - MMU_TRANSLATE_READ( R_EAX );
47.174 + MEM_READ_LONG( R_EAX, R_EAX );
47.175 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
47.176 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.177 }
47.178 - MEM_READ_LONG( R_EAX, R_EAX );
47.179 - MOV_r32_esp8( R_EAX, 4 );
47.180 - MOV_esp8_r32( 0, R_EAX );
47.181 - MEM_READ_LONG( R_EAX, R_EAX );
47.182 - MOV_esp8_r32( 4, R_ECX );
47.183 -
47.184 - IMUL_r32( R_ECX );
47.185 +
47.186 + IMUL_esp8( 0 );
47.187 ADD_r32_sh4r( R_EAX, R_MACL );
47.188 ADC_r32_sh4r( R_EDX, R_MACH );
47.189
47.190 @@ -700,32 +688,26 @@
47.191 if( Rm == Rn ) {
47.192 load_reg( R_EAX, Rm );
47.193 check_ralign16( R_EAX );
47.194 - MMU_TRANSLATE_READ( R_EAX );
47.195 + MEM_READ_WORD( R_EAX, R_EAX );
47.196 MOV_r32_esp8( R_EAX, 0 );
47.197 - load_reg( R_EAX, Rn );
47.198 - ADD_imm8s_r32( 2, R_EAX );
47.199 - MMU_TRANSLATE_READ( R_EAX );
47.200 + load_reg( R_EAX, Rm );
47.201 + LEA_r32disp8_r32( R_EAX, 2, R_EAX );
47.202 + MEM_READ_WORD( R_EAX, R_EAX );
47.203 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
47.204 // Note translate twice in case of page boundaries. Maybe worth
47.205 // adding a page-boundary check to skip the second translation
47.206 } else {
47.207 load_reg( R_EAX, Rm );
47.208 check_ralign16( R_EAX );
47.209 - MMU_TRANSLATE_READ( R_EAX );
47.210 + MEM_READ_WORD( R_EAX, R_EAX );
47.211 MOV_r32_esp8( R_EAX, 0 );
47.212 load_reg( R_EAX, Rn );
47.213 check_ralign16( R_EAX );
47.214 - MMU_TRANSLATE_READ( R_EAX );
47.215 + MEM_READ_WORD( R_EAX, R_EAX );
47.216 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
47.217 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
47.218 }
47.219 - MEM_READ_WORD( R_EAX, R_EAX );
47.220 - MOV_r32_esp8( R_EAX, 4 );
47.221 - MOV_esp8_r32( 0, R_EAX );
47.222 - MEM_READ_WORD( R_EAX, R_EAX );
47.223 - MOV_esp8_r32( 4, R_ECX );
47.224 -
47.225 - IMUL_r32( R_ECX );
47.226 + IMUL_esp8( 0 );
47.227 load_spreg( R_ECX, R_S );
47.228 TEST_r32_r32( R_ECX, R_ECX );
47.229 JE_rel8( nosat );
47.230 @@ -823,9 +805,7 @@
47.231 OR.B #imm, @(R0, GBR) {:
47.232 COUNT_INST(I_ORB);
47.233 load_reg( R_EAX, 0 );
47.234 - load_spreg( R_ECX, R_GBR );
47.235 - ADD_r32_r32( R_ECX, R_EAX );
47.236 - MMU_TRANSLATE_WRITE( R_EAX );
47.237 + ADD_sh4r_r32( R_GBR, R_EAX );
47.238 MOV_r32_esp8( R_EAX, 0 );
47.239 MEM_READ_BYTE( R_EAX, R_EDX );
47.240 MOV_esp8_r32( 0, R_EAX );
47.241 @@ -1044,7 +1024,6 @@
47.242 TAS.B @Rn {:
47.243 COUNT_INST(I_TASB);
47.244 load_reg( R_EAX, Rn );
47.245 - MMU_TRANSLATE_WRITE( R_EAX );
47.246 MOV_r32_esp8( R_EAX, 0 );
47.247 MEM_READ_BYTE( R_EAX, R_EDX );
47.248 TEST_r8_r8( R_DL, R_DL );
47.249 @@ -1072,9 +1051,7 @@
47.250 TST.B #imm, @(R0, GBR) {:
47.251 COUNT_INST(I_TSTB);
47.252 load_reg( R_EAX, 0);
47.253 - load_reg( R_ECX, R_GBR);
47.254 - ADD_r32_r32( R_ECX, R_EAX );
47.255 - MMU_TRANSLATE_READ( R_EAX );
47.256 + ADD_sh4r_r32( R_GBR, R_EAX );
47.257 MEM_READ_BYTE( R_EAX, R_EAX );
47.258 TEST_imm8_r8( imm, R_AL );
47.259 SETE_t();
47.260 @@ -1098,9 +1075,7 @@
47.261 XOR.B #imm, @(R0, GBR) {:
47.262 COUNT_INST(I_XORB);
47.263 load_reg( R_EAX, 0 );
47.264 - load_spreg( R_ECX, R_GBR );
47.265 - ADD_r32_r32( R_ECX, R_EAX );
47.266 - MMU_TRANSLATE_WRITE( R_EAX );
47.267 + ADD_sh4r_r32( R_GBR, R_EAX );
47.268 MOV_r32_esp8( R_EAX, 0 );
47.269 MEM_READ_BYTE(R_EAX, R_EDX);
47.270 MOV_esp8_r32( 0, R_EAX );
47.271 @@ -1133,7 +1108,6 @@
47.272 MOV.B Rm, @Rn {:
47.273 COUNT_INST(I_MOVB);
47.274 load_reg( R_EAX, Rn );
47.275 - MMU_TRANSLATE_WRITE( R_EAX );
47.276 load_reg( R_EDX, Rm );
47.277 MEM_WRITE_BYTE( R_EAX, R_EDX );
47.278 sh4_x86.tstate = TSTATE_NONE;
47.279 @@ -1141,19 +1115,16 @@
47.280 MOV.B Rm, @-Rn {:
47.281 COUNT_INST(I_MOVB);
47.282 load_reg( R_EAX, Rn );
47.283 - ADD_imm8s_r32( -1, R_EAX );
47.284 - MMU_TRANSLATE_WRITE( R_EAX );
47.285 + LEA_r32disp8_r32( R_EAX, -1, R_EAX );
47.286 load_reg( R_EDX, Rm );
47.287 + MEM_WRITE_BYTE( R_EAX, R_EDX );
47.288 ADD_imm8s_sh4r( -1, REG_OFFSET(r[Rn]) );
47.289 - MEM_WRITE_BYTE( R_EAX, R_EDX );
47.290 sh4_x86.tstate = TSTATE_NONE;
47.291 :}
47.292 MOV.B Rm, @(R0, Rn) {:
47.293 COUNT_INST(I_MOVB);
47.294 load_reg( R_EAX, 0 );
47.295 - load_reg( R_ECX, Rn );
47.296 - ADD_r32_r32( R_ECX, R_EAX );
47.297 - MMU_TRANSLATE_WRITE( R_EAX );
47.298 + ADD_sh4r_r32( REG_OFFSET(r[Rn]), R_EAX );
47.299 load_reg( R_EDX, Rm );
47.300 MEM_WRITE_BYTE( R_EAX, R_EDX );
47.301 sh4_x86.tstate = TSTATE_NONE;
47.302 @@ -1162,7 +1133,6 @@
47.303 COUNT_INST(I_MOVB);
47.304 load_spreg( R_EAX, R_GBR );
47.305 ADD_imm32_r32( disp, R_EAX );
47.306 - MMU_TRANSLATE_WRITE( R_EAX );
47.307 load_reg( R_EDX, 0 );
47.308 MEM_WRITE_BYTE( R_EAX, R_EDX );
47.309 sh4_x86.tstate = TSTATE_NONE;
47.310 @@ -1171,7 +1141,6 @@
47.311 COUNT_INST(I_MOVB);
47.312 load_reg( R_EAX, Rn );
47.313 ADD_imm32_r32( disp, R_EAX );
47.314 - MMU_TRANSLATE_WRITE( R_EAX );
47.315 load_reg( R_EDX, 0 );
47.316 MEM_WRITE_BYTE( R_EAX, R_EDX );
47.317 sh4_x86.tstate = TSTATE_NONE;
47.318 @@ -1179,7 +1148,6 @@
47.319 MOV.B @Rm, Rn {:
47.320 COUNT_INST(I_MOVB);
47.321 load_reg( R_EAX, Rm );
47.322 - MMU_TRANSLATE_READ( R_EAX );
47.323 MEM_READ_BYTE( R_EAX, R_EAX );
47.324 store_reg( R_EAX, Rn );
47.325 sh4_x86.tstate = TSTATE_NONE;
47.326 @@ -1187,18 +1155,17 @@
47.327 MOV.B @Rm+, Rn {:
47.328 COUNT_INST(I_MOVB);
47.329 load_reg( R_EAX, Rm );
47.330 - MMU_TRANSLATE_READ( R_EAX );
47.331 - ADD_imm8s_sh4r( 1, REG_OFFSET(r[Rm]) );
47.332 MEM_READ_BYTE( R_EAX, R_EAX );
47.333 + if( Rm != Rn ) {
47.334 + ADD_imm8s_sh4r( 1, REG_OFFSET(r[Rm]) );
47.335 + }
47.336 store_reg( R_EAX, Rn );
47.337 sh4_x86.tstate = TSTATE_NONE;
47.338 :}
47.339 MOV.B @(R0, Rm), Rn {:
47.340 COUNT_INST(I_MOVB);
47.341 load_reg( R_EAX, 0 );
47.342 - load_reg( R_ECX, Rm );
47.343 - ADD_r32_r32( R_ECX, R_EAX );
47.344 - MMU_TRANSLATE_READ( R_EAX )
47.345 + ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
47.346 MEM_READ_BYTE( R_EAX, R_EAX );
47.347 store_reg( R_EAX, Rn );
47.348 sh4_x86.tstate = TSTATE_NONE;
47.349 @@ -1207,7 +1174,6 @@
47.350 COUNT_INST(I_MOVB);
47.351 load_spreg( R_EAX, R_GBR );
47.352 ADD_imm32_r32( disp, R_EAX );
47.353 - MMU_TRANSLATE_READ( R_EAX );
47.354 MEM_READ_BYTE( R_EAX, R_EAX );
47.355 store_reg( R_EAX, 0 );
47.356 sh4_x86.tstate = TSTATE_NONE;
47.357 @@ -1216,7 +1182,6 @@
47.358 COUNT_INST(I_MOVB);
47.359 load_reg( R_EAX, Rm );
47.360 ADD_imm32_r32( disp, R_EAX );
47.361 - MMU_TRANSLATE_READ( R_EAX );
47.362 MEM_READ_BYTE( R_EAX, R_EAX );
47.363 store_reg( R_EAX, 0 );
47.364 sh4_x86.tstate = TSTATE_NONE;
47.365 @@ -1225,9 +1190,18 @@
47.366 COUNT_INST(I_MOVL);
47.367 load_reg( R_EAX, Rn );
47.368 check_walign32(R_EAX);
47.369 - MMU_TRANSLATE_WRITE( R_EAX );
47.370 + MOV_r32_r32( R_EAX, R_ECX );
47.371 + AND_imm32_r32( 0xFC000000, R_ECX );
47.372 + CMP_imm32_r32( 0xE0000000, R_ECX );
47.373 + JNE_rel8( notsq );
47.374 + AND_imm8s_r32( 0x3C, R_EAX );
47.375 + load_reg( R_EDX, Rm );
47.376 + MOV_r32_ebpr32disp32( R_EDX, R_EAX, REG_OFFSET(store_queue) );
47.377 + JMP_rel8(end);
47.378 + JMP_TARGET(notsq);
47.379 load_reg( R_EDX, Rm );
47.380 MEM_WRITE_LONG( R_EAX, R_EDX );
47.381 + JMP_TARGET(end);
47.382 sh4_x86.tstate = TSTATE_NONE;
47.383 :}
47.384 MOV.L Rm, @-Rn {:
47.385 @@ -1235,19 +1209,16 @@
47.386 load_reg( R_EAX, Rn );
47.387 ADD_imm8s_r32( -4, R_EAX );
47.388 check_walign32( R_EAX );
47.389 - MMU_TRANSLATE_WRITE( R_EAX );
47.390 load_reg( R_EDX, Rm );
47.391 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.392 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.393 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.394 sh4_x86.tstate = TSTATE_NONE;
47.395 :}
47.396 MOV.L Rm, @(R0, Rn) {:
47.397 COUNT_INST(I_MOVL);
47.398 load_reg( R_EAX, 0 );
47.399 - load_reg( R_ECX, Rn );
47.400 - ADD_r32_r32( R_ECX, R_EAX );
47.401 + ADD_sh4r_r32( REG_OFFSET(r[Rn]), R_EAX );
47.402 check_walign32( R_EAX );
47.403 - MMU_TRANSLATE_WRITE( R_EAX );
47.404 load_reg( R_EDX, Rm );
47.405 MEM_WRITE_LONG( R_EAX, R_EDX );
47.406 sh4_x86.tstate = TSTATE_NONE;
47.407 @@ -1257,7 +1228,6 @@
47.408 load_spreg( R_EAX, R_GBR );
47.409 ADD_imm32_r32( disp, R_EAX );
47.410 check_walign32( R_EAX );
47.411 - MMU_TRANSLATE_WRITE( R_EAX );
47.412 load_reg( R_EDX, 0 );
47.413 MEM_WRITE_LONG( R_EAX, R_EDX );
47.414 sh4_x86.tstate = TSTATE_NONE;
47.415 @@ -1267,16 +1237,24 @@
47.416 load_reg( R_EAX, Rn );
47.417 ADD_imm32_r32( disp, R_EAX );
47.418 check_walign32( R_EAX );
47.419 - MMU_TRANSLATE_WRITE( R_EAX );
47.420 + MOV_r32_r32( R_EAX, R_ECX );
47.421 + AND_imm32_r32( 0xFC000000, R_ECX );
47.422 + CMP_imm32_r32( 0xE0000000, R_ECX );
47.423 + JNE_rel8( notsq );
47.424 + AND_imm8s_r32( 0x3C, R_EAX );
47.425 + load_reg( R_EDX, Rm );
47.426 + MOV_r32_ebpr32disp32( R_EDX, R_EAX, REG_OFFSET(store_queue) );
47.427 + JMP_rel8(end);
47.428 + JMP_TARGET(notsq);
47.429 load_reg( R_EDX, Rm );
47.430 MEM_WRITE_LONG( R_EAX, R_EDX );
47.431 + JMP_TARGET(end);
47.432 sh4_x86.tstate = TSTATE_NONE;
47.433 :}
47.434 MOV.L @Rm, Rn {:
47.435 COUNT_INST(I_MOVL);
47.436 load_reg( R_EAX, Rm );
47.437 check_ralign32( R_EAX );
47.438 - MMU_TRANSLATE_READ( R_EAX );
47.439 MEM_READ_LONG( R_EAX, R_EAX );
47.440 store_reg( R_EAX, Rn );
47.441 sh4_x86.tstate = TSTATE_NONE;
47.442 @@ -1285,19 +1263,18 @@
47.443 COUNT_INST(I_MOVL);
47.444 load_reg( R_EAX, Rm );
47.445 check_ralign32( R_EAX );
47.446 - MMU_TRANSLATE_READ( R_EAX );
47.447 - ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.448 MEM_READ_LONG( R_EAX, R_EAX );
47.449 + if( Rm != Rn ) {
47.450 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.451 + }
47.452 store_reg( R_EAX, Rn );
47.453 sh4_x86.tstate = TSTATE_NONE;
47.454 :}
47.455 MOV.L @(R0, Rm), Rn {:
47.456 COUNT_INST(I_MOVL);
47.457 load_reg( R_EAX, 0 );
47.458 - load_reg( R_ECX, Rm );
47.459 - ADD_r32_r32( R_ECX, R_EAX );
47.460 + ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
47.461 check_ralign32( R_EAX );
47.462 - MMU_TRANSLATE_READ( R_EAX );
47.463 MEM_READ_LONG( R_EAX, R_EAX );
47.464 store_reg( R_EAX, Rn );
47.465 sh4_x86.tstate = TSTATE_NONE;
47.466 @@ -1307,7 +1284,6 @@
47.467 load_spreg( R_EAX, R_GBR );
47.468 ADD_imm32_r32( disp, R_EAX );
47.469 check_ralign32( R_EAX );
47.470 - MMU_TRANSLATE_READ( R_EAX );
47.471 MEM_READ_LONG( R_EAX, R_EAX );
47.472 store_reg( R_EAX, 0 );
47.473 sh4_x86.tstate = TSTATE_NONE;
47.474 @@ -1336,7 +1312,6 @@
47.475 // but we can safely assume that the low bits are the same.
47.476 load_imm32( R_EAX, (pc-sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
47.477 ADD_sh4r_r32( R_PC, R_EAX );
47.478 - MMU_TRANSLATE_READ( R_EAX );
47.479 MEM_READ_LONG( R_EAX, R_EAX );
47.480 sh4_x86.tstate = TSTATE_NONE;
47.481 }
47.482 @@ -1348,7 +1323,6 @@
47.483 load_reg( R_EAX, Rm );
47.484 ADD_imm8s_r32( disp, R_EAX );
47.485 check_ralign32( R_EAX );
47.486 - MMU_TRANSLATE_READ( R_EAX );
47.487 MEM_READ_LONG( R_EAX, R_EAX );
47.488 store_reg( R_EAX, Rn );
47.489 sh4_x86.tstate = TSTATE_NONE;
47.490 @@ -1357,7 +1331,6 @@
47.491 COUNT_INST(I_MOVW);
47.492 load_reg( R_EAX, Rn );
47.493 check_walign16( R_EAX );
47.494 - MMU_TRANSLATE_WRITE( R_EAX )
47.495 load_reg( R_EDX, Rm );
47.496 MEM_WRITE_WORD( R_EAX, R_EDX );
47.497 sh4_x86.tstate = TSTATE_NONE;
47.498 @@ -1365,21 +1338,18 @@
47.499 MOV.W Rm, @-Rn {:
47.500 COUNT_INST(I_MOVW);
47.501 load_reg( R_EAX, Rn );
47.502 - ADD_imm8s_r32( -2, R_EAX );
47.503 check_walign16( R_EAX );
47.504 - MMU_TRANSLATE_WRITE( R_EAX );
47.505 + LEA_r32disp8_r32( R_EAX, -2, R_EAX );
47.506 load_reg( R_EDX, Rm );
47.507 + MEM_WRITE_WORD( R_EAX, R_EDX );
47.508 ADD_imm8s_sh4r( -2, REG_OFFSET(r[Rn]) );
47.509 - MEM_WRITE_WORD( R_EAX, R_EDX );
47.510 sh4_x86.tstate = TSTATE_NONE;
47.511 :}
47.512 MOV.W Rm, @(R0, Rn) {:
47.513 COUNT_INST(I_MOVW);
47.514 load_reg( R_EAX, 0 );
47.515 - load_reg( R_ECX, Rn );
47.516 - ADD_r32_r32( R_ECX, R_EAX );
47.517 + ADD_sh4r_r32( REG_OFFSET(r[Rn]), R_EAX );
47.518 check_walign16( R_EAX );
47.519 - MMU_TRANSLATE_WRITE( R_EAX );
47.520 load_reg( R_EDX, Rm );
47.521 MEM_WRITE_WORD( R_EAX, R_EDX );
47.522 sh4_x86.tstate = TSTATE_NONE;
47.523 @@ -1389,7 +1359,6 @@
47.524 load_spreg( R_EAX, R_GBR );
47.525 ADD_imm32_r32( disp, R_EAX );
47.526 check_walign16( R_EAX );
47.527 - MMU_TRANSLATE_WRITE( R_EAX );
47.528 load_reg( R_EDX, 0 );
47.529 MEM_WRITE_WORD( R_EAX, R_EDX );
47.530 sh4_x86.tstate = TSTATE_NONE;
47.531 @@ -1399,7 +1368,6 @@
47.532 load_reg( R_EAX, Rn );
47.533 ADD_imm32_r32( disp, R_EAX );
47.534 check_walign16( R_EAX );
47.535 - MMU_TRANSLATE_WRITE( R_EAX );
47.536 load_reg( R_EDX, 0 );
47.537 MEM_WRITE_WORD( R_EAX, R_EDX );
47.538 sh4_x86.tstate = TSTATE_NONE;
47.539 @@ -1408,7 +1376,6 @@
47.540 COUNT_INST(I_MOVW);
47.541 load_reg( R_EAX, Rm );
47.542 check_ralign16( R_EAX );
47.543 - MMU_TRANSLATE_READ( R_EAX );
47.544 MEM_READ_WORD( R_EAX, R_EAX );
47.545 store_reg( R_EAX, Rn );
47.546 sh4_x86.tstate = TSTATE_NONE;
47.547 @@ -1417,19 +1384,18 @@
47.548 COUNT_INST(I_MOVW);
47.549 load_reg( R_EAX, Rm );
47.550 check_ralign16( R_EAX );
47.551 - MMU_TRANSLATE_READ( R_EAX );
47.552 - ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
47.553 MEM_READ_WORD( R_EAX, R_EAX );
47.554 + if( Rm != Rn ) {
47.555 + ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
47.556 + }
47.557 store_reg( R_EAX, Rn );
47.558 sh4_x86.tstate = TSTATE_NONE;
47.559 :}
47.560 MOV.W @(R0, Rm), Rn {:
47.561 COUNT_INST(I_MOVW);
47.562 load_reg( R_EAX, 0 );
47.563 - load_reg( R_ECX, Rm );
47.564 - ADD_r32_r32( R_ECX, R_EAX );
47.565 + ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
47.566 check_ralign16( R_EAX );
47.567 - MMU_TRANSLATE_READ( R_EAX );
47.568 MEM_READ_WORD( R_EAX, R_EAX );
47.569 store_reg( R_EAX, Rn );
47.570 sh4_x86.tstate = TSTATE_NONE;
47.571 @@ -1439,7 +1405,6 @@
47.572 load_spreg( R_EAX, R_GBR );
47.573 ADD_imm32_r32( disp, R_EAX );
47.574 check_ralign16( R_EAX );
47.575 - MMU_TRANSLATE_READ( R_EAX );
47.576 MEM_READ_WORD( R_EAX, R_EAX );
47.577 store_reg( R_EAX, 0 );
47.578 sh4_x86.tstate = TSTATE_NONE;
47.579 @@ -1458,7 +1423,6 @@
47.580 } else {
47.581 load_imm32( R_EAX, (pc - sh4_x86.block_start_pc) + disp + 4 );
47.582 ADD_sh4r_r32( R_PC, R_EAX );
47.583 - MMU_TRANSLATE_READ( R_EAX );
47.584 MEM_READ_WORD( R_EAX, R_EAX );
47.585 sh4_x86.tstate = TSTATE_NONE;
47.586 }
47.587 @@ -1470,7 +1434,6 @@
47.588 load_reg( R_EAX, Rm );
47.589 ADD_imm32_r32( disp, R_EAX );
47.590 check_ralign16( R_EAX );
47.591 - MMU_TRANSLATE_READ( R_EAX );
47.592 MEM_READ_WORD( R_EAX, R_EAX );
47.593 store_reg( R_EAX, 0 );
47.594 sh4_x86.tstate = TSTATE_NONE;
47.595 @@ -1490,7 +1453,6 @@
47.596 COUNT_INST(I_MOVCA);
47.597 load_reg( R_EAX, Rn );
47.598 check_walign32( R_EAX );
47.599 - MMU_TRANSLATE_WRITE( R_EAX );
47.600 load_reg( R_EDX, 0 );
47.601 MEM_WRITE_LONG( R_EAX, R_EDX );
47.602 sh4_x86.tstate = TSTATE_NONE;
47.603 @@ -1731,7 +1693,6 @@
47.604 load_spreg( R_EAX, R_SSR );
47.605 call_func1( sh4_write_sr, R_EAX );
47.606 sh4_x86.in_delay_slot = DELAY_PC;
47.607 - sh4_x86.priv_checked = FALSE;
47.608 sh4_x86.fpuen_checked = FALSE;
47.609 sh4_x86.tstate = TSTATE_NONE;
47.610 sh4_x86.branch_taken = TRUE;
47.611 @@ -1841,13 +1802,14 @@
47.612 load_reg( R_EAX, Rn );
47.613 if( sh4_x86.double_size ) {
47.614 check_walign64( R_EAX );
47.615 - MMU_TRANSLATE_WRITE( R_EAX );
47.616 load_dr0( R_EDX, FRm );
47.617 - load_dr1( R_ECX, FRm );
47.618 - MEM_WRITE_DOUBLE( R_EAX, R_EDX, R_ECX );
47.619 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.620 + load_reg( R_EAX, Rn );
47.621 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
47.622 + load_dr1( R_EDX, FRm );
47.623 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.624 } else {
47.625 check_walign32( R_EAX );
47.626 - MMU_TRANSLATE_WRITE( R_EAX );
47.627 load_fr( R_EDX, FRm );
47.628 MEM_WRITE_LONG( R_EAX, R_EDX );
47.629 }
47.630 @@ -1859,13 +1821,14 @@
47.631 load_reg( R_EAX, Rm );
47.632 if( sh4_x86.double_size ) {
47.633 check_ralign64( R_EAX );
47.634 - MMU_TRANSLATE_READ( R_EAX );
47.635 - MEM_READ_DOUBLE( R_EAX, R_EDX, R_EAX );
47.636 - store_dr0( R_EDX, FRn );
47.637 - store_dr1( R_EAX, FRn );
47.638 + MEM_READ_LONG( R_EAX, R_EAX );
47.639 + store_dr0( R_EAX, FRn );
47.640 + load_reg( R_EAX, Rm );
47.641 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
47.642 + MEM_READ_LONG( R_EAX, R_EAX );
47.643 + store_dr1( R_EAX, FRn );
47.644 } else {
47.645 check_ralign32( R_EAX );
47.646 - MMU_TRANSLATE_READ( R_EAX );
47.647 MEM_READ_LONG( R_EAX, R_EAX );
47.648 store_fr( R_EAX, FRn );
47.649 }
47.650 @@ -1877,19 +1840,20 @@
47.651 load_reg( R_EAX, Rn );
47.652 if( sh4_x86.double_size ) {
47.653 check_walign64( R_EAX );
47.654 - ADD_imm8s_r32(-8,R_EAX);
47.655 - MMU_TRANSLATE_WRITE( R_EAX );
47.656 + LEA_r32disp8_r32( R_EAX, -8, R_EAX );
47.657 load_dr0( R_EDX, FRm );
47.658 - load_dr1( R_ECX, FRm );
47.659 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.660 + load_reg( R_EAX, Rn );
47.661 + LEA_r32disp8_r32( R_EAX, -4, R_EAX );
47.662 + load_dr1( R_EDX, FRm );
47.663 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.664 ADD_imm8s_sh4r(-8,REG_OFFSET(r[Rn]));
47.665 - MEM_WRITE_DOUBLE( R_EAX, R_EDX, R_ECX );
47.666 } else {
47.667 check_walign32( R_EAX );
47.668 - ADD_imm8s_r32( -4, R_EAX );
47.669 - MMU_TRANSLATE_WRITE( R_EAX );
47.670 + LEA_r32disp8_r32( R_EAX, -4, R_EAX );
47.671 load_fr( R_EDX, FRm );
47.672 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.673 ADD_imm8s_sh4r(-4,REG_OFFSET(r[Rn]));
47.674 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.675 }
47.676 sh4_x86.tstate = TSTATE_NONE;
47.677 :}
47.678 @@ -1899,17 +1863,18 @@
47.679 load_reg( R_EAX, Rm );
47.680 if( sh4_x86.double_size ) {
47.681 check_ralign64( R_EAX );
47.682 - MMU_TRANSLATE_READ( R_EAX );
47.683 + MEM_READ_LONG( R_EAX, R_EAX );
47.684 + store_dr0( R_EAX, FRn );
47.685 + load_reg( R_EAX, Rm );
47.686 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
47.687 + MEM_READ_LONG( R_EAX, R_EAX );
47.688 + store_dr1( R_EAX, FRn );
47.689 ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rm]) );
47.690 - MEM_READ_DOUBLE( R_EAX, R_EDX, R_EAX );
47.691 - store_dr0( R_EDX, FRn );
47.692 - store_dr1( R_EAX, FRn );
47.693 } else {
47.694 check_ralign32( R_EAX );
47.695 - MMU_TRANSLATE_READ( R_EAX );
47.696 - ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.697 MEM_READ_LONG( R_EAX, R_EAX );
47.698 store_fr( R_EAX, FRn );
47.699 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.700 }
47.701 sh4_x86.tstate = TSTATE_NONE;
47.702 :}
47.703 @@ -1920,13 +1885,15 @@
47.704 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
47.705 if( sh4_x86.double_size ) {
47.706 check_walign64( R_EAX );
47.707 - MMU_TRANSLATE_WRITE( R_EAX );
47.708 load_dr0( R_EDX, FRm );
47.709 - load_dr1( R_ECX, FRm );
47.710 - MEM_WRITE_DOUBLE( R_EAX, R_EDX, R_ECX );
47.711 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.712 + load_reg( R_EAX, Rn );
47.713 + ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
47.714 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
47.715 + load_dr1( R_EDX, FRm );
47.716 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.717 } else {
47.718 check_walign32( R_EAX );
47.719 - MMU_TRANSLATE_WRITE( R_EAX );
47.720 load_fr( R_EDX, FRm );
47.721 MEM_WRITE_LONG( R_EAX, R_EDX ); // 12
47.722 }
47.723 @@ -1939,13 +1906,15 @@
47.724 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
47.725 if( sh4_x86.double_size ) {
47.726 check_ralign64( R_EAX );
47.727 - MMU_TRANSLATE_READ( R_EAX );
47.728 - MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
47.729 - store_dr0( R_ECX, FRn );
47.730 + MEM_READ_LONG( R_EAX, R_EAX );
47.731 + store_dr0( R_EAX, FRn );
47.732 + load_reg( R_EAX, Rm );
47.733 + ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
47.734 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
47.735 + MEM_READ_LONG( R_EAX, R_EAX );
47.736 store_dr1( R_EAX, FRn );
47.737 } else {
47.738 check_ralign32( R_EAX );
47.739 - MMU_TRANSLATE_READ( R_EAX );
47.740 MEM_READ_LONG( R_EAX, R_EAX );
47.741 store_fr( R_EAX, FRn );
47.742 }
47.743 @@ -2280,18 +2249,15 @@
47.744 FRCHG {:
47.745 COUNT_INST(I_FRCHG);
47.746 check_fpuen();
47.747 - load_spreg( R_ECX, R_FPSCR );
47.748 - XOR_imm32_r32( FPSCR_FR, R_ECX );
47.749 - store_spreg( R_ECX, R_FPSCR );
47.750 + XOR_imm32_sh4r( FPSCR_FR, R_FPSCR );
47.751 call_func0( sh4_switch_fr_banks );
47.752 sh4_x86.tstate = TSTATE_NONE;
47.753 :}
47.754 FSCHG {:
47.755 COUNT_INST(I_FSCHG);
47.756 check_fpuen();
47.757 - load_spreg( R_ECX, R_FPSCR );
47.758 - XOR_imm32_r32( FPSCR_SZ, R_ECX );
47.759 - store_spreg( R_ECX, R_FPSCR );
47.760 + XOR_imm32_sh4r( FPSCR_SZ, R_FPSCR);
47.761 + XOR_imm32_sh4r( FPSCR_SZ, REG_OFFSET(xlat_sh4_mode) );
47.762 sh4_x86.tstate = TSTATE_NONE;
47.763 sh4_x86.double_size = !sh4_x86.double_size;
47.764 :}
47.765 @@ -2305,9 +2271,9 @@
47.766 check_priv();
47.767 load_reg( R_EAX, Rm );
47.768 call_func1( sh4_write_sr, R_EAX );
47.769 - sh4_x86.priv_checked = FALSE;
47.770 sh4_x86.fpuen_checked = FALSE;
47.771 sh4_x86.tstate = TSTATE_NONE;
47.772 + return 2;
47.773 }
47.774 :}
47.775 LDC Rm, GBR {:
47.776 @@ -2361,9 +2327,8 @@
47.777 COUNT_INST(I_LDCM);
47.778 load_reg( R_EAX, Rm );
47.779 check_ralign32( R_EAX );
47.780 - MMU_TRANSLATE_READ( R_EAX );
47.781 + MEM_READ_LONG( R_EAX, R_EAX );
47.782 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.783 - MEM_READ_LONG( R_EAX, R_EAX );
47.784 store_spreg( R_EAX, R_GBR );
47.785 sh4_x86.tstate = TSTATE_NONE;
47.786 :}
47.787 @@ -2375,13 +2340,12 @@
47.788 check_priv();
47.789 load_reg( R_EAX, Rm );
47.790 check_ralign32( R_EAX );
47.791 - MMU_TRANSLATE_READ( R_EAX );
47.792 + MEM_READ_LONG( R_EAX, R_EAX );
47.793 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.794 - MEM_READ_LONG( R_EAX, R_EAX );
47.795 call_func1( sh4_write_sr, R_EAX );
47.796 - sh4_x86.priv_checked = FALSE;
47.797 sh4_x86.fpuen_checked = FALSE;
47.798 sh4_x86.tstate = TSTATE_NONE;
47.799 + return 2;
47.800 }
47.801 :}
47.802 LDC.L @Rm+, VBR {:
47.803 @@ -2389,9 +2353,8 @@
47.804 check_priv();
47.805 load_reg( R_EAX, Rm );
47.806 check_ralign32( R_EAX );
47.807 - MMU_TRANSLATE_READ( R_EAX );
47.808 + MEM_READ_LONG( R_EAX, R_EAX );
47.809 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.810 - MEM_READ_LONG( R_EAX, R_EAX );
47.811 store_spreg( R_EAX, R_VBR );
47.812 sh4_x86.tstate = TSTATE_NONE;
47.813 :}
47.814 @@ -2400,9 +2363,8 @@
47.815 check_priv();
47.816 load_reg( R_EAX, Rm );
47.817 check_ralign32( R_EAX );
47.818 - MMU_TRANSLATE_READ( R_EAX );
47.819 + MEM_READ_LONG( R_EAX, R_EAX );
47.820 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.821 - MEM_READ_LONG( R_EAX, R_EAX );
47.822 store_spreg( R_EAX, R_SSR );
47.823 sh4_x86.tstate = TSTATE_NONE;
47.824 :}
47.825 @@ -2411,9 +2373,8 @@
47.826 check_priv();
47.827 load_reg( R_EAX, Rm );
47.828 check_ralign32( R_EAX );
47.829 - MMU_TRANSLATE_READ( R_EAX );
47.830 + MEM_READ_LONG( R_EAX, R_EAX );
47.831 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.832 - MEM_READ_LONG( R_EAX, R_EAX );
47.833 store_spreg( R_EAX, R_SGR );
47.834 sh4_x86.tstate = TSTATE_NONE;
47.835 :}
47.836 @@ -2422,9 +2383,8 @@
47.837 check_priv();
47.838 load_reg( R_EAX, Rm );
47.839 check_ralign32( R_EAX );
47.840 - MMU_TRANSLATE_READ( R_EAX );
47.841 + MEM_READ_LONG( R_EAX, R_EAX );
47.842 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.843 - MEM_READ_LONG( R_EAX, R_EAX );
47.844 store_spreg( R_EAX, R_SPC );
47.845 sh4_x86.tstate = TSTATE_NONE;
47.846 :}
47.847 @@ -2433,9 +2393,8 @@
47.848 check_priv();
47.849 load_reg( R_EAX, Rm );
47.850 check_ralign32( R_EAX );
47.851 - MMU_TRANSLATE_READ( R_EAX );
47.852 + MEM_READ_LONG( R_EAX, R_EAX );
47.853 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.854 - MEM_READ_LONG( R_EAX, R_EAX );
47.855 store_spreg( R_EAX, R_DBR );
47.856 sh4_x86.tstate = TSTATE_NONE;
47.857 :}
47.858 @@ -2444,9 +2403,8 @@
47.859 check_priv();
47.860 load_reg( R_EAX, Rm );
47.861 check_ralign32( R_EAX );
47.862 - MMU_TRANSLATE_READ( R_EAX );
47.863 + MEM_READ_LONG( R_EAX, R_EAX );
47.864 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.865 - MEM_READ_LONG( R_EAX, R_EAX );
47.866 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
47.867 sh4_x86.tstate = TSTATE_NONE;
47.868 :}
47.869 @@ -2463,9 +2421,8 @@
47.870 check_fpuen();
47.871 load_reg( R_EAX, Rm );
47.872 check_ralign32( R_EAX );
47.873 - MMU_TRANSLATE_READ( R_EAX );
47.874 + MEM_READ_LONG( R_EAX, R_EAX );
47.875 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.876 - MEM_READ_LONG( R_EAX, R_EAX );
47.877 call_func1( sh4_write_fpscr, R_EAX );
47.878 sh4_x86.tstate = TSTATE_NONE;
47.879 return 2;
47.880 @@ -2481,9 +2438,8 @@
47.881 check_fpuen();
47.882 load_reg( R_EAX, Rm );
47.883 check_ralign32( R_EAX );
47.884 - MMU_TRANSLATE_READ( R_EAX );
47.885 + MEM_READ_LONG( R_EAX, R_EAX );
47.886 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.887 - MEM_READ_LONG( R_EAX, R_EAX );
47.888 store_spreg( R_EAX, R_FPUL );
47.889 sh4_x86.tstate = TSTATE_NONE;
47.890 :}
47.891 @@ -2496,9 +2452,8 @@
47.892 COUNT_INST(I_LDSM);
47.893 load_reg( R_EAX, Rm );
47.894 check_ralign32( R_EAX );
47.895 - MMU_TRANSLATE_READ( R_EAX );
47.896 + MEM_READ_LONG( R_EAX, R_EAX );
47.897 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.898 - MEM_READ_LONG( R_EAX, R_EAX );
47.899 store_spreg( R_EAX, R_MACH );
47.900 sh4_x86.tstate = TSTATE_NONE;
47.901 :}
47.902 @@ -2511,9 +2466,8 @@
47.903 COUNT_INST(I_LDSM);
47.904 load_reg( R_EAX, Rm );
47.905 check_ralign32( R_EAX );
47.906 - MMU_TRANSLATE_READ( R_EAX );
47.907 + MEM_READ_LONG( R_EAX, R_EAX );
47.908 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.909 - MEM_READ_LONG( R_EAX, R_EAX );
47.910 store_spreg( R_EAX, R_MACL );
47.911 sh4_x86.tstate = TSTATE_NONE;
47.912 :}
47.913 @@ -2526,9 +2480,8 @@
47.914 COUNT_INST(I_LDSM);
47.915 load_reg( R_EAX, Rm );
47.916 check_ralign32( R_EAX );
47.917 - MMU_TRANSLATE_READ( R_EAX );
47.918 + MEM_READ_LONG( R_EAX, R_EAX );
47.919 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
47.920 - MEM_READ_LONG( R_EAX, R_EAX );
47.921 store_spreg( R_EAX, R_PR );
47.922 sh4_x86.tstate = TSTATE_NONE;
47.923 :}
47.924 @@ -2549,18 +2502,7 @@
47.925 PREF @Rn {:
47.926 COUNT_INST(I_PREF);
47.927 load_reg( R_EAX, Rn );
47.928 - MOV_r32_r32( R_EAX, R_ECX );
47.929 - AND_imm32_r32( 0xFC000000, R_ECX );
47.930 - CMP_imm32_r32( 0xE0000000, R_ECX );
47.931 - JNE_rel8(end);
47.932 - if( sh4_x86.tlb_on ) {
47.933 - call_func1( sh4_flush_store_queue_mmu, R_EAX );
47.934 - TEST_r32_r32( R_EAX, R_EAX );
47.935 - JE_exc(-1);
47.936 - } else {
47.937 - call_func1( sh4_flush_store_queue, R_EAX );
47.938 - }
47.939 - JMP_TARGET(end);
47.940 + MEM_PREFETCH( R_EAX );
47.941 sh4_x86.tstate = TSTATE_NONE;
47.942 :}
47.943 SLEEP {:
47.944 @@ -2628,16 +2570,13 @@
47.945 STC.L SR, @-Rn {:
47.946 COUNT_INST(I_STCSRM);
47.947 check_priv();
47.948 + call_func0( sh4_read_sr );
47.949 + MOV_r32_r32( R_EAX, R_EDX );
47.950 load_reg( R_EAX, Rn );
47.951 check_walign32( R_EAX );
47.952 - ADD_imm8s_r32( -4, R_EAX );
47.953 - MMU_TRANSLATE_WRITE( R_EAX );
47.954 - MOV_r32_esp8( R_EAX, 0 );
47.955 - call_func0( sh4_read_sr );
47.956 - MOV_r32_r32( R_EAX, R_EDX );
47.957 - MOV_esp8_r32( 0, R_EAX );
47.958 + LEA_r32disp8_r32( R_EAX, -4, R_EAX );
47.959 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.960 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.961 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.962 sh4_x86.tstate = TSTATE_NONE;
47.963 :}
47.964 STC.L VBR, @-Rn {:
47.965 @@ -2646,10 +2585,9 @@
47.966 load_reg( R_EAX, Rn );
47.967 check_walign32( R_EAX );
47.968 ADD_imm8s_r32( -4, R_EAX );
47.969 - MMU_TRANSLATE_WRITE( R_EAX );
47.970 load_spreg( R_EDX, R_VBR );
47.971 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.972 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.973 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.974 sh4_x86.tstate = TSTATE_NONE;
47.975 :}
47.976 STC.L SSR, @-Rn {:
47.977 @@ -2658,10 +2596,9 @@
47.978 load_reg( R_EAX, Rn );
47.979 check_walign32( R_EAX );
47.980 ADD_imm8s_r32( -4, R_EAX );
47.981 - MMU_TRANSLATE_WRITE( R_EAX );
47.982 load_spreg( R_EDX, R_SSR );
47.983 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.984 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.985 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.986 sh4_x86.tstate = TSTATE_NONE;
47.987 :}
47.988 STC.L SPC, @-Rn {:
47.989 @@ -2670,10 +2607,9 @@
47.990 load_reg( R_EAX, Rn );
47.991 check_walign32( R_EAX );
47.992 ADD_imm8s_r32( -4, R_EAX );
47.993 - MMU_TRANSLATE_WRITE( R_EAX );
47.994 load_spreg( R_EDX, R_SPC );
47.995 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.996 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.997 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.998 sh4_x86.tstate = TSTATE_NONE;
47.999 :}
47.1000 STC.L SGR, @-Rn {:
47.1001 @@ -2682,10 +2618,9 @@
47.1002 load_reg( R_EAX, Rn );
47.1003 check_walign32( R_EAX );
47.1004 ADD_imm8s_r32( -4, R_EAX );
47.1005 - MMU_TRANSLATE_WRITE( R_EAX );
47.1006 load_spreg( R_EDX, R_SGR );
47.1007 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.1008 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.1009 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.1010 sh4_x86.tstate = TSTATE_NONE;
47.1011 :}
47.1012 STC.L DBR, @-Rn {:
47.1013 @@ -2694,10 +2629,9 @@
47.1014 load_reg( R_EAX, Rn );
47.1015 check_walign32( R_EAX );
47.1016 ADD_imm8s_r32( -4, R_EAX );
47.1017 - MMU_TRANSLATE_WRITE( R_EAX );
47.1018 load_spreg( R_EDX, R_DBR );
47.1019 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.1020 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.1021 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.1022 sh4_x86.tstate = TSTATE_NONE;
47.1023 :}
47.1024 STC.L Rm_BANK, @-Rn {:
47.1025 @@ -2706,10 +2640,9 @@
47.1026 load_reg( R_EAX, Rn );
47.1027 check_walign32( R_EAX );
47.1028 ADD_imm8s_r32( -4, R_EAX );
47.1029 - MMU_TRANSLATE_WRITE( R_EAX );
47.1030 load_spreg( R_EDX, REG_OFFSET(r_bank[Rm_BANK]) );
47.1031 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.1032 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.1033 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.1034 sh4_x86.tstate = TSTATE_NONE;
47.1035 :}
47.1036 STC.L GBR, @-Rn {:
47.1037 @@ -2717,10 +2650,9 @@
47.1038 load_reg( R_EAX, Rn );
47.1039 check_walign32( R_EAX );
47.1040 ADD_imm8s_r32( -4, R_EAX );
47.1041 - MMU_TRANSLATE_WRITE( R_EAX );
47.1042 load_spreg( R_EDX, R_GBR );
47.1043 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.1044 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.1045 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.1046 sh4_x86.tstate = TSTATE_NONE;
47.1047 :}
47.1048 STS FPSCR, Rn {:
47.1049 @@ -2735,10 +2667,9 @@
47.1050 load_reg( R_EAX, Rn );
47.1051 check_walign32( R_EAX );
47.1052 ADD_imm8s_r32( -4, R_EAX );
47.1053 - MMU_TRANSLATE_WRITE( R_EAX );
47.1054 load_spreg( R_EDX, R_FPSCR );
47.1055 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.1056 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.1057 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.1058 sh4_x86.tstate = TSTATE_NONE;
47.1059 :}
47.1060 STS FPUL, Rn {:
47.1061 @@ -2753,10 +2684,9 @@
47.1062 load_reg( R_EAX, Rn );
47.1063 check_walign32( R_EAX );
47.1064 ADD_imm8s_r32( -4, R_EAX );
47.1065 - MMU_TRANSLATE_WRITE( R_EAX );
47.1066 load_spreg( R_EDX, R_FPUL );
47.1067 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.1068 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.1069 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.1070 sh4_x86.tstate = TSTATE_NONE;
47.1071 :}
47.1072 STS MACH, Rn {:
47.1073 @@ -2769,10 +2699,9 @@
47.1074 load_reg( R_EAX, Rn );
47.1075 check_walign32( R_EAX );
47.1076 ADD_imm8s_r32( -4, R_EAX );
47.1077 - MMU_TRANSLATE_WRITE( R_EAX );
47.1078 load_spreg( R_EDX, R_MACH );
47.1079 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.1080 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.1081 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.1082 sh4_x86.tstate = TSTATE_NONE;
47.1083 :}
47.1084 STS MACL, Rn {:
47.1085 @@ -2785,10 +2714,9 @@
47.1086 load_reg( R_EAX, Rn );
47.1087 check_walign32( R_EAX );
47.1088 ADD_imm8s_r32( -4, R_EAX );
47.1089 - MMU_TRANSLATE_WRITE( R_EAX );
47.1090 load_spreg( R_EDX, R_MACL );
47.1091 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.1092 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.1093 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.1094 sh4_x86.tstate = TSTATE_NONE;
47.1095 :}
47.1096 STS PR, Rn {:
47.1097 @@ -2801,10 +2729,9 @@
47.1098 load_reg( R_EAX, Rn );
47.1099 check_walign32( R_EAX );
47.1100 ADD_imm8s_r32( -4, R_EAX );
47.1101 - MMU_TRANSLATE_WRITE( R_EAX );
47.1102 load_spreg( R_EDX, R_PR );
47.1103 + MEM_WRITE_LONG( R_EAX, R_EDX );
47.1104 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
47.1105 - MEM_WRITE_LONG( R_EAX, R_EDX );
47.1106 sh4_x86.tstate = TSTATE_NONE;
47.1107 :}
47.1108
48.1 --- a/src/sh4/timer.c Mon Dec 15 10:44:56 2008 +0000
48.2 +++ b/src/sh4/timer.c Tue Jan 13 11:56:28 2009 +0000
48.3 @@ -44,9 +44,9 @@
48.4 uint32_t sh4_bus_period = 2* 1000 / SH4_BASE_RATE;
48.5 uint32_t sh4_peripheral_period = 4 * 2000 / SH4_BASE_RATE;
48.6
48.7 -int32_t mmio_region_CPG_read( uint32_t reg )
48.8 +MMIO_REGION_READ_FN( CPG, reg )
48.9 {
48.10 - return MMIO_READ( CPG, reg );
48.11 + return MMIO_READ( CPG, reg&0xFFF );
48.12 }
48.13
48.14 /* CPU + bus dividers (note officially only the first 6 values are valid) */
48.15 @@ -54,11 +54,11 @@
48.16 /* Peripheral clock dividers (only first 5 are officially valid) */
48.17 int pfc_divider[8] = { 2, 3, 4, 6, 8, 8, 8, 8 };
48.18
48.19 -void mmio_region_CPG_write( uint32_t reg, uint32_t val )
48.20 +MMIO_REGION_WRITE_FN( CPG, reg, val )
48.21 {
48.22 uint32_t div;
48.23 uint32_t primary_clock = sh4_input_freq;
48.24 -
48.25 + reg &= 0xFFF;
48.26 switch( reg ) {
48.27 case FRQCR: /* Frequency control */
48.28 if( (val & FRQCR_PLL1EN) == 0 )
48.29 @@ -98,14 +98,14 @@
48.30
48.31 uint32_t rtc_output_period;
48.32
48.33 -int32_t mmio_region_RTC_read( uint32_t reg )
48.34 +MMIO_REGION_READ_FN( RTC, reg )
48.35 {
48.36 - return MMIO_READ( RTC, reg );
48.37 + return MMIO_READ( RTC, reg &0xFFF );
48.38 }
48.39
48.40 -void mmio_region_RTC_write( uint32_t reg, uint32_t val )
48.41 +MMIO_REGION_WRITE_FN( RTC, reg, val )
48.42 {
48.43 - MMIO_WRITE( RTC, reg, val );
48.44 + MMIO_WRITE( RTC, reg &0xFFF, val );
48.45 }
48.46
48.47 /********************************** TMU *************************************/
48.48 @@ -140,22 +140,6 @@
48.49
48.50 static struct TMU_timer TMU_timers[3];
48.51
48.52 -int32_t mmio_region_TMU_read( uint32_t reg )
48.53 -{
48.54 - switch( reg ) {
48.55 - case TCNT0:
48.56 - TMU_count( 0, sh4r.slice_cycle );
48.57 - break;
48.58 - case TCNT1:
48.59 - TMU_count( 1, sh4r.slice_cycle );
48.60 - break;
48.61 - case TCNT2:
48.62 - TMU_count( 2, sh4r.slice_cycle );
48.63 - break;
48.64 - }
48.65 - return MMIO_READ( TMU, reg );
48.66 -}
48.67 -
48.68 void TMU_set_timer_control( int timer, int tcr )
48.69 {
48.70 uint32_t period = 1;
48.71 @@ -261,10 +245,28 @@
48.72 return value;
48.73 }
48.74
48.75 -void mmio_region_TMU_write( uint32_t reg, uint32_t val )
48.76 +MMIO_REGION_READ_FN( TMU, reg )
48.77 +{
48.78 + reg &= 0xFFF;
48.79 + switch( reg ) {
48.80 + case TCNT0:
48.81 + TMU_count( 0, sh4r.slice_cycle );
48.82 + break;
48.83 + case TCNT1:
48.84 + TMU_count( 1, sh4r.slice_cycle );
48.85 + break;
48.86 + case TCNT2:
48.87 + TMU_count( 2, sh4r.slice_cycle );
48.88 + break;
48.89 + }
48.90 + return MMIO_READ( TMU, reg );
48.91 +}
48.92 +
48.93 +MMIO_REGION_WRITE_FN( TMU, reg, val )
48.94 {
48.95 uint32_t oldval;
48.96 int i;
48.97 + reg &= 0xFFF;
48.98 switch( reg ) {
48.99 case TSTR:
48.100 oldval = MMIO_READ( TMU, TSTR );
49.1 --- a/src/sh4/x86op.h Mon Dec 15 10:44:56 2008 +0000
49.2 +++ b/src/sh4/x86op.h Tue Jan 13 11:56:28 2009 +0000
49.3 @@ -55,12 +55,18 @@
49.4 #define LEA_sh4r_rptr(disp, r1) REXW(); LEA_sh4r_r32(disp,r1)
49.5 #define MOV_moffptr_EAX(offptr) REXW(); MOV_moff32_EAX( offptr )
49.6 #define load_exc_backpatch( x86reg ) REXW(); OP(0xB8 + x86reg); sh4_x86_add_backpatch( xlat_output, pc, -2 ); OP64( 0 )
49.7 +#define MOV_backpatch_esp8( disp ) REXW(); OP(0xC7); MODRM_r32_esp8(0, disp); sh4_x86_add_backpatch( xlat_output, pc, -2); OP64(0)
49.8 +
49.9 +/* imm64 operations are only defined for x86-64 */
49.10 +#define MOV_imm64_r32(i64,r1) REXW(); OP(0xB8+r1); OP64(i64)
49.11 +
49.12 #else /* 32-bit system */
49.13 #define OPPTR(x) OP32((uint32_t)(x))
49.14 #define AND_imm8s_rptr(imm, r1) AND_imm8s_r32( imm, r1 )
49.15 #define LEA_sh4r_rptr(disp, r1) LEA_sh4r_r32(disp,r1)
49.16 #define MOV_moffptr_EAX(offptr) MOV_moff32_EAX( offptr )
49.17 #define load_exc_backpatch( x86reg ) OP(0xB8 + x86reg); sh4_x86_add_backpatch( xlat_output, pc, -2 ); OP32( 0 )
49.18 +#define MOV_backpatch_esp8( disp ) OP(0xC7); MODRM_r32_esp8(0, disp); sh4_x86_add_backpatch( xlat_output, pc, -2); OP32(0)
49.19 #endif
49.20 #define STACK_ALIGN 16
49.21 #define POP_r32(r1) OP(0x58 + r1)
49.22 @@ -112,11 +118,14 @@
49.23 /* ebp+disp32 modrm form */
49.24 #define MODRM_r32_ebp32(r1,disp) OP(0x85 | (r1<<3)); OP32(disp)
49.25
49.26 -/* esp+disp32 modrm+sib form */
49.27 +/* esp+disp8 modrm+sib form */
49.28 #define MODRM_r32_esp8(r1,disp) OP(0x44 | (r1<<3)); OP(0x24); OP(disp)
49.29
49.30 #define MODRM_r32_sh4r(r1,disp) if(disp>127){ MODRM_r32_ebp32(r1,disp);}else{ MODRM_r32_ebp8(r1,(unsigned char)disp); }
49.31
49.32 +/* Absolute displacement (no base) */
49.33 +#define MODRM_r32_disp32(r1,disp) OP(0x05 | (r1<<3)); OP32(disp)
49.34 +
49.35 #define REXW() OP(0x48)
49.36
49.37 /* Major opcodes */
49.38 @@ -133,9 +142,13 @@
49.39 #define AND_r32_r32(r1,r2) OP(0x23); MODRM_rm32_r32(r1,r2)
49.40 #define AND_imm8_r8(imm8, r1) OP(0x80); MODRM_rm32_r32(r1,4); OP(imm8)
49.41 #define AND_imm8s_r32(imm8,r1) OP(0x83); MODRM_rm32_r32(r1,4); OP(imm8)
49.42 +#define AND_imm8s_sh4r(imm8,disp) OP(0x83); MODRM_r32_sh4r(4,disp); OP(imm8)
49.43 #define AND_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,4); OP32(imm)
49.44 +#define AND_sh4r_r32(disp,r1) OP(0x23); MODRM_r32_sh4r(r1, disp)
49.45 #define CALL_r32(r1) OP(0xFF); MODRM_rm32_r32(r1,2)
49.46 -#define CALL_ptr(ptr) OP(0xE8); OP32( (((char *)ptr) - (char *)xlat_output) - 4)
49.47 +#define CALL_ptr(ptr) OP(0xE8); OP32( (((char *)ptr) - (char *)xlat_output) - 4)
49.48 +#define CALL_sh4r(disp) OP(0xFF); MODRM_r32_sh4r(2, disp)
49.49 +#define CALL_r32disp8(r1,disp) OP(0xFF); OP(0x50 + r1); OP(disp)
49.50 #define CLC() OP(0xF8)
49.51 #define CMC() OP(0xF5)
49.52 #define CMP_sh4r_r32(disp,r1) OP(0x3B); MODRM_r32_sh4r(r1,disp)
49.53 @@ -145,22 +158,31 @@
49.54 #define CMP_imm8s_sh4r(imm,disp) OP(0x83); MODRM_r32_sh4r(7,disp) OP(imm)
49.55 #define DEC_r32(r1) OP(0x48+r1)
49.56 #define IMUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,5)
49.57 +#define IMUL_esp8(disp) OP(0xF7); MODRM_r32_esp8(5,disp)
49.58 #define INC_r32(r1) OP(0x40+r1)
49.59 #define JMP_rel8(label) OP(0xEB); MARK_JMP8(label); OP(-1);
49.60 +#define JMP_r32disp8(r1,disp) OP(0xFF); OP(0x60 + r1); OP(disp)
49.61 #define LEA_sh4r_r32(disp,r1) OP(0x8D); MODRM_r32_sh4r(r1,disp)
49.62 #define LEA_r32disp8_r32(r1, disp, r2) OP(0x8D); OP( 0x40 + (r2<<3) + r1); OP(disp)
49.63 +#define MOV_imm32_r32(i32,r1) OP(0xB8+r1); OP32(i32)
49.64 #define MOV_r32_r32(r1,r2) OP(0x89); MODRM_r32_rm32(r1,r2)
49.65 #define MOV_r32_sh4r(r1,disp) OP(0x89); MODRM_r32_sh4r(r1,disp)
49.66 #define MOV_moff32_EAX(off) OP(0xA1); OPPTR(off)
49.67 #define MOV_sh4r_r32(disp, r1) OP(0x8B); MODRM_r32_sh4r(r1,disp)
49.68 #define MOV_r32_r32ind(r2,r1) OP(0x89); OP(0 + (r2<<3) + r1 )
49.69 #define MOV_r32ind_r32(r1,r2) OP(0x8B); OP(0 + (r2<<3) + r1 )
49.70 +#define MOV_r32_r32disp32(r2,r1,disp) OP(0x89); OP(0x80 + (r2<<3) + r1); OP32(disp)
49.71 +#define MOV_r32_ebpr32disp32(r2,r1,disp) OP(0x89); OP(0x84 + (r2<<3)); OP(0x05 + (r1<<3)); OP32(disp)
49.72 +#define MOV_r32disp32_r32(r1,disp,r2) OP(0x8B); OP(0x80 + (r2<<3) + r1); OP32(disp)
49.73 +#define MOV_r32disp32x4_r32(r1,disp,r2) OP(0x8B); OP(0x04 + (r2<<3)); OP(0x85+(r1<<3)); OP32(disp)
49.74 #define MOV_r32_esp8(r1,disp) OP(0x89); MODRM_r32_esp8(r1,disp)
49.75 #define MOV_esp8_r32(disp,r1) OP(0x8B); MODRM_r32_esp8(r1,disp)
49.76 #define MOVSX_r8_r32(r1,r2) OP(0x0F); OP(0xBE); MODRM_rm32_r32(r1,r2)
49.77 #define MOVSX_r16_r32(r1,r2) OP(0x0F); OP(0xBF); MODRM_rm32_r32(r1,r2)
49.78 #define MOVZX_r8_r32(r1,r2) OP(0x0F); OP(0xB6); MODRM_rm32_r32(r1,r2)
49.79 #define MOVZX_r16_r32(r1,r2) OP(0x0F); OP(0xB7); MODRM_rm32_r32(r1,r2)
49.80 +#define MOVZX_sh4r8_r32(disp,r1) OP(0x0F); OP(0xB6); MODRM_r32_sh4r(r1,disp)
49.81 +#define MOVZX_sh4r16_r32(disp,r1) OP(0x0F); OP(0xB7); MODRM_r32_sh4r(r1,disp)
49.82 #define MUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,4)
49.83 #define NEG_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,3)
49.84 #define NOT_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,2)
49.85 @@ -197,6 +219,7 @@
49.86 #define XOR_r32_r32(r1,r2) OP(0x33); MODRM_rm32_r32(r1,r2)
49.87 #define XOR_sh4r_r32(disp,r1) OP(0x33); MODRM_r32_sh4r(r1,disp)
49.88 #define XOR_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,6); OP32(imm)
49.89 +#define XOR_imm32_sh4r(imm,disp) OP(0x81); MODRM_r32_sh4r(6, disp); OP32(imm)
49.90
49.91
49.92 /* Floating point ops */
49.93 @@ -266,6 +289,7 @@
49.94 #define JNC_exc(exc) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
49.95 #define JNO_exc(exc) OP(0x0F); OP(0x81); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
49.96
49.97 +#define EXPJE_rel8(label) OP(0x3E); JE_rel8(label)
49.98
49.99 /* Conditional moves ebp-rel */
49.100 #define CMOVE_r32_r32(r1,r2) OP(0x0F); OP(0x44); MODRM_rm32_r32(r1,r2)
50.1 --- a/src/sh4/xltcache.c Mon Dec 15 10:44:56 2008 +0000
50.2 +++ b/src/sh4/xltcache.c Tue Jan 13 11:56:28 2009 +0000
50.3 @@ -59,7 +59,7 @@
50.4 xlat_cache_block_t xlat_old_cache_ptr;
50.5 #endif
50.6
50.7 -static void ***xlat_lut;
50.8 +static void **xlat_lut[XLAT_LUT_PAGES];
50.9 static gboolean xlat_initialized = FALSE;
50.10
50.11 void xlat_cache_init(void)
50.12 @@ -78,8 +78,8 @@
50.13 xlat_temp_cache_ptr = xlat_temp_cache;
50.14 xlat_old_cache_ptr = xlat_old_cache;
50.15 #endif
50.16 - xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
50.17 - MAP_PRIVATE|MAP_ANON, -1, 0);
50.18 +// xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
50.19 +// MAP_PRIVATE|MAP_ANON, -1, 0);
50.20 memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
50.21 }
50.22 xlat_flush_cache();
50.23 @@ -132,26 +132,22 @@
50.24
50.25 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
50.26 {
50.27 - if( xlat_lut ) {
50.28 - void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
50.29 - if( page != NULL ) {
50.30 - int entry = XLAT_LUT_ENTRY(addr);
50.31 - if( page[entry] != NULL ) {
50.32 - xlat_flush_page_by_lut(page);
50.33 - }
50.34 + void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
50.35 + if( page != NULL ) {
50.36 + int entry = XLAT_LUT_ENTRY(addr);
50.37 + if( page[entry] != NULL ) {
50.38 + xlat_flush_page_by_lut(page);
50.39 }
50.40 }
50.41 }
50.42
50.43 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
50.44 {
50.45 - if( xlat_lut ) {
50.46 - void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
50.47 - if( page != NULL ) {
50.48 - int entry = XLAT_LUT_ENTRY(addr);
50.49 - if( page[entry] != NULL || page[entry+1] != NULL ) {
50.50 - xlat_flush_page_by_lut(page);
50.51 - }
50.52 + void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
50.53 + if( page != NULL ) {
50.54 + int entry = XLAT_LUT_ENTRY(addr);
50.55 + if( *(uint64_t *)&page[entry] != 0 ) {
50.56 + xlat_flush_page_by_lut(page);
50.57 }
50.58 }
50.59 }
50.60 @@ -162,32 +158,30 @@
50.61 int entry_count = size >> 1; // words;
50.62 uint32_t page_no = XLAT_LUT_PAGE(address);
50.63 int entry = XLAT_LUT_ENTRY(address);
50.64 - if( xlat_lut ) {
50.65 - do {
50.66 - void **page = xlat_lut[page_no];
50.67 - int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
50.68 - if( entry_count < page_entries ) {
50.69 - page_entries = entry_count;
50.70 - }
50.71 - if( page != NULL ) {
50.72 - if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
50.73 - /* Overwriting the entire page anyway */
50.74 - xlat_flush_page_by_lut(page);
50.75 - } else {
50.76 - for( i=entry; i<entry+page_entries; i++ ) {
50.77 - if( page[i] != NULL ) {
50.78 - xlat_flush_page_by_lut(page);
50.79 - break;
50.80 - }
50.81 + do {
50.82 + void **page = xlat_lut[page_no];
50.83 + int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
50.84 + if( entry_count < page_entries ) {
50.85 + page_entries = entry_count;
50.86 + }
50.87 + if( page != NULL ) {
50.88 + if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
50.89 + /* Overwriting the entire page anyway */
50.90 + xlat_flush_page_by_lut(page);
50.91 + } else {
50.92 + for( i=entry; i<entry+page_entries; i++ ) {
50.93 + if( page[i] != NULL ) {
50.94 + xlat_flush_page_by_lut(page);
50.95 + break;
50.96 }
50.97 }
50.98 - entry_count -= page_entries;
50.99 }
50.100 - page_no ++;
50.101 entry_count -= page_entries;
50.102 - entry = 0;
50.103 - } while( entry_count > 0 );
50.104 - }
50.105 + }
50.106 + page_no ++;
50.107 + entry_count -= page_entries;
50.108 + entry = 0;
50.109 + } while( entry_count > 0 );
50.110 }
50.111
50.112 void FASTCALL xlat_flush_page( sh4addr_t address )
50.113 @@ -208,29 +202,6 @@
50.114 return result;
50.115 }
50.116
50.117 -xlat_recovery_record_t xlat_get_post_recovery( void *code, void *native_pc, gboolean with_terminal )
50.118 -{
50.119 - if( code != NULL ) {
50.120 - uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
50.121 - xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
50.122 - uint32_t count = block->recover_table_size;
50.123 - xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
50.124 - uint32_t posn;
50.125 - if( count > 0 && !with_terminal )
50.126 - count--;
50.127 - if( records[count-1].xlat_offset < pc_offset ) {
50.128 - return NULL;
50.129 - }
50.130 - for( posn=count-1; posn > 0; posn-- ) {
50.131 - if( records[posn-1].xlat_offset < pc_offset ) {
50.132 - return &records[posn];
50.133 - }
50.134 - }
50.135 - return &records[0]; // shouldn't happen
50.136 - }
50.137 - return NULL;
50.138 -}
50.139 -
50.140 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
50.141 {
50.142 if( code != NULL ) {
51.1 --- a/src/sh4/xltcache.h Mon Dec 15 10:44:56 2008 +0000
51.2 +++ b/src/sh4/xltcache.h Tue Jan 13 11:56:28 2009 +0000
51.3 @@ -41,7 +41,7 @@
51.4 int active; /* 0 = deleted, 1 = normal. 2 = accessed (temp-space only) */
51.5 uint32_t size;
51.6 void **lut_entry; /* For deletion */
51.7 - uint32_t fpscr_mask, fpscr; /* fpscr condition check */
51.8 + uint32_t xlat_sh4_mode; /* comparison with sh4r.xlat_sh4_mode */
51.9 uint32_t recover_table_offset; // Offset from code[0] of the recovery table;
51.10 uint32_t recover_table_size;
51.11 unsigned char code[0];
51.12 @@ -51,8 +51,7 @@
51.13
51.14 #define XLAT_BLOCK_FOR_CODE(code) (((xlat_cache_block_t)code)-1)
51.15
51.16 -#define XLAT_BLOCK_FPSCR_MASK(code) (XLAT_BLOCK_FOR_CODE(code)->fpscr_mask)
51.17 -#define XLAT_BLOCK_FPSCR(code) (XLAT_BLOCK_FOR_CODE(code)->fpscr_mask)
51.18 +#define XLAT_BLOCK_MODE(code) (XLAT_BLOCK_FOR_CODE(code)->xlat_sh4_mode)
51.19
51.20 /**
51.21 * Initialize the translation cache
51.22 @@ -102,22 +101,10 @@
51.23 void * FASTCALL xlat_get_code( sh4addr_t address );
51.24
51.25 /**
51.26 - * Retrieve the post-instruction recovery record corresponding to the given
51.27 - * native address, or NULL if there is no recovery code for the address.
51.28 - * @param code The code block containing the recovery table.