Search
lxdream.org :: lxdream :: r939:6f2302afeb89
lxdream 0.9.1
released Jun 29
Download Now
changeset939:6f2302afeb89 lxdream-mem
parent938:e377bd827c54
child940:81e0d3051d5f
authornkeynes
dateSat Jan 03 03:30:26 2009 +0000 (15 years ago)
branchlxdream-mem
MMU work-in-progress
* Move SDRAM out into separate sdram.c
* Move all page-table management into mmu.c
* Convert UTLB management to use the new page-tables
* Rip out all calls to mmu_vma_to_phys_* and replace with direct access
src/Makefile.am
src/Makefile.in
src/lxdream.h
src/mem.c
src/mem.h
src/sdram.c
src/sh4/cache.c
src/sh4/ia32abi.h
src/sh4/ia64abi.h
src/sh4/mmu.c
src/sh4/mmu.h
src/sh4/mmux86.c
src/sh4/sh4.c
src/sh4/sh4core.h
src/sh4/sh4core.in
src/sh4/sh4dasm.in
src/sh4/sh4mem.c
src/sh4/sh4mmio.h
src/sh4/sh4x86.in
src/sh4/x86op.h
src/test/testsh4x86.c
src/x86dasm/x86dasm.c
1.1 --- a/src/Makefile.am Sat Dec 27 04:09:17 2008 +0000
1.2 +++ b/src/Makefile.am Sat Jan 03 03:30:26 2009 +0000
1.3 @@ -34,7 +34,7 @@
1.4
1.5 lxdream_SOURCES = \
1.6 main.c version.c config.c config.h lxdream.h dream.h gui.h cpu.h hook.h \
1.7 - gettext.h mem.c mem.h mmio.h paths.c watch.c \
1.8 + gettext.h mem.c mem.h sdram.c mmio.h paths.c watch.c \
1.9 asic.c asic.h clock.h serial.h \
1.10 syscall.c syscall.h bios.c dcload.c \
1.11 gdrom/ide.c gdrom/ide.h gdrom/packet.h gdrom/gdimage.c \
1.12 @@ -66,7 +66,7 @@
1.13 if BUILD_SH4X86
1.14 lxdream_SOURCES += sh4/sh4x86.c sh4/x86op.h \
1.15 sh4/ia32abi.h sh4/ia32mac.h sh4/ia64abi.h \
1.16 - sh4/sh4trans.c sh4/sh4trans.h \
1.17 + sh4/sh4trans.c sh4/sh4trans.h sh4/mmux86.c \
1.18 x86dasm/x86dasm.c x86dasm/x86dasm.h \
1.19 x86dasm/i386-dis.c x86dasm/dis-init.c x86dasm/dis-buf.c \
1.20 x86dasm/ansidecl.h x86dasm/bfd.h x86dasm/dis-asm.h \
1.21 @@ -76,8 +76,8 @@
1.22 test_testsh4x86_SOURCES = test/testsh4x86.c x86dasm/x86dasm.c \
1.23 x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
1.24 x86dasm/dis-buf.c \
1.25 - sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
1.26 - sh4/xltcache.h mem.c util.c sh4/mmu.c
1.27 + sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
1.28 + sh4/xltcache.h mem.c util.c
1.29
1.30 check_PROGRAMS += test/testsh4x86
1.31 endif
2.1 --- a/src/Makefile.in Sat Dec 27 04:09:17 2008 +0000
2.2 +++ b/src/Makefile.in Sat Jan 03 03:30:26 2009 +0000
2.3 @@ -41,7 +41,7 @@
2.4 check_PROGRAMS = test/testxlt$(EXEEXT) $(am__EXEEXT_1)
2.5 @BUILD_SH4X86_TRUE@am__append_1 = sh4/sh4x86.c sh4/x86op.h \
2.6 @BUILD_SH4X86_TRUE@ sh4/ia32abi.h sh4/ia32mac.h sh4/ia64abi.h \
2.7 -@BUILD_SH4X86_TRUE@ sh4/sh4trans.c sh4/sh4trans.h \
2.8 +@BUILD_SH4X86_TRUE@ sh4/sh4trans.c sh4/sh4trans.h sh4/mmux86.c \
2.9 @BUILD_SH4X86_TRUE@ x86dasm/x86dasm.c x86dasm/x86dasm.h \
2.10 @BUILD_SH4X86_TRUE@ x86dasm/i386-dis.c x86dasm/dis-init.c x86dasm/dis-buf.c \
2.11 @BUILD_SH4X86_TRUE@ x86dasm/ansidecl.h x86dasm/bfd.h x86dasm/dis-asm.h \
2.12 @@ -94,7 +94,7 @@
2.13 genglsl_DEPENDENCIES = $(am__DEPENDENCIES_1)
2.14 am__lxdream_SOURCES_DIST = main.c version.c config.c config.h \
2.15 lxdream.h dream.h gui.h cpu.h hook.h gettext.h mem.c mem.h \
2.16 - mmio.h paths.c watch.c asic.c asic.h clock.h serial.h \
2.17 + sdram.c mmio.h paths.c watch.c asic.c asic.h clock.h serial.h \
2.18 syscall.c syscall.h bios.c dcload.c gdrom/ide.c gdrom/ide.h \
2.19 gdrom/packet.h gdrom/gdimage.c gdrom/gdrom.c gdrom/gdrom.h \
2.20 gdrom/nrg.c gdrom/cdi.c gdrom/gdi.c gdrom/edc_ecc.c \
2.21 @@ -121,25 +121,27 @@
2.22 drivers/gl_fbo.c sh4/sh4.def sh4/sh4core.in sh4/sh4x86.in \
2.23 sh4/sh4dasm.in sh4/sh4stat.in sh4/sh4x86.c sh4/x86op.h \
2.24 sh4/ia32abi.h sh4/ia32mac.h sh4/ia64abi.h sh4/sh4trans.c \
2.25 - sh4/sh4trans.h x86dasm/x86dasm.c x86dasm/x86dasm.h \
2.26 - x86dasm/i386-dis.c x86dasm/dis-init.c x86dasm/dis-buf.c \
2.27 - x86dasm/ansidecl.h x86dasm/bfd.h x86dasm/dis-asm.h \
2.28 - x86dasm/symcat.h x86dasm/sysdep.h gtkui/gtkui.c gtkui/gtkui.h \
2.29 - gtkui/gtk_win.c gtkui/gtkcb.c gtkui/gtk_mmio.c \
2.30 - gtkui/gtk_debug.c gtkui/gtk_dump.c gtkui/gtk_ctrl.c \
2.31 - gtkui/gtk_path.c gtkui/gtk_gd.c drivers/video_gtk.c \
2.32 - cocoaui/cocoaui.c cocoaui/cocoaui.h cocoaui/cocoa_win.c \
2.33 - cocoaui/cocoa_gd.c cocoaui/cocoa_prefs.c cocoaui/cocoa_path.c \
2.34 - cocoaui/cocoa_ctrl.c drivers/video_osx.c drivers/mac_keymap.h \
2.35 - drivers/mac_keymap.txt drivers/video_gdk.c drivers/video_glx.c \
2.36 - drivers/video_glx.h drivers/video_nsgl.c drivers/video_nsgl.h \
2.37 - drivers/audio_osx.c drivers/audio_pulse.c drivers/audio_esd.c \
2.38 - drivers/audio_alsa.c drivers/cd_linux.c drivers/cd_osx.c \
2.39 - drivers/osx_iokit.c drivers/osx_iokit.h drivers/cd_none.c \
2.40 - drivers/joy_linux.c drivers/joy_linux.h
2.41 + sh4/sh4trans.h sh4/mmux86.c x86dasm/x86dasm.c \
2.42 + x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
2.43 + x86dasm/dis-buf.c x86dasm/ansidecl.h x86dasm/bfd.h \
2.44 + x86dasm/dis-asm.h x86dasm/symcat.h x86dasm/sysdep.h \
2.45 + gtkui/gtkui.c gtkui/gtkui.h gtkui/gtk_win.c gtkui/gtkcb.c \
2.46 + gtkui/gtk_mmio.c gtkui/gtk_debug.c gtkui/gtk_dump.c \
2.47 + gtkui/gtk_ctrl.c gtkui/gtk_path.c gtkui/gtk_gd.c \
2.48 + drivers/video_gtk.c cocoaui/cocoaui.c cocoaui/cocoaui.h \
2.49 + cocoaui/cocoa_win.c cocoaui/cocoa_gd.c cocoaui/cocoa_prefs.c \
2.50 + cocoaui/cocoa_path.c cocoaui/cocoa_ctrl.c drivers/video_osx.c \
2.51 + drivers/mac_keymap.h drivers/mac_keymap.txt \
2.52 + drivers/video_gdk.c drivers/video_glx.c drivers/video_glx.h \
2.53 + drivers/video_nsgl.c drivers/video_nsgl.h drivers/audio_osx.c \
2.54 + drivers/audio_pulse.c drivers/audio_esd.c drivers/audio_alsa.c \
2.55 + drivers/cd_linux.c drivers/cd_osx.c drivers/osx_iokit.c \
2.56 + drivers/osx_iokit.h drivers/cd_none.c drivers/joy_linux.c \
2.57 + drivers/joy_linux.h
2.58 @BUILD_SH4X86_TRUE@am__objects_1 = sh4x86.$(OBJEXT) sh4trans.$(OBJEXT) \
2.59 -@BUILD_SH4X86_TRUE@ x86dasm.$(OBJEXT) i386-dis.$(OBJEXT) \
2.60 -@BUILD_SH4X86_TRUE@ dis-init.$(OBJEXT) dis-buf.$(OBJEXT)
2.61 +@BUILD_SH4X86_TRUE@ mmux86.$(OBJEXT) x86dasm.$(OBJEXT) \
2.62 +@BUILD_SH4X86_TRUE@ i386-dis.$(OBJEXT) dis-init.$(OBJEXT) \
2.63 +@BUILD_SH4X86_TRUE@ dis-buf.$(OBJEXT)
2.64 @GUI_GTK_TRUE@am__objects_2 = gtkui.$(OBJEXT) gtk_win.$(OBJEXT) \
2.65 @GUI_GTK_TRUE@ gtkcb.$(OBJEXT) gtk_mmio.$(OBJEXT) \
2.66 @GUI_GTK_TRUE@ gtk_debug.$(OBJEXT) gtk_dump.$(OBJEXT) \
2.67 @@ -161,17 +163,17 @@
2.68 @CDROM_NONE_TRUE@am__objects_13 = cd_none.$(OBJEXT)
2.69 @JOY_LINUX_TRUE@am__objects_14 = joy_linux.$(OBJEXT)
2.70 am_lxdream_OBJECTS = main.$(OBJEXT) version.$(OBJEXT) config.$(OBJEXT) \
2.71 - mem.$(OBJEXT) paths.$(OBJEXT) watch.$(OBJEXT) asic.$(OBJEXT) \
2.72 - syscall.$(OBJEXT) bios.$(OBJEXT) dcload.$(OBJEXT) \
2.73 - ide.$(OBJEXT) gdimage.$(OBJEXT) gdrom.$(OBJEXT) nrg.$(OBJEXT) \
2.74 - cdi.$(OBJEXT) gdi.$(OBJEXT) edc_ecc.$(OBJEXT) mmc.$(OBJEXT) \
2.75 - dreamcast.$(OBJEXT) eventq.$(OBJEXT) sh4.$(OBJEXT) \
2.76 - intc.$(OBJEXT) sh4mem.$(OBJEXT) timer.$(OBJEXT) dmac.$(OBJEXT) \
2.77 - mmu.$(OBJEXT) sh4core.$(OBJEXT) sh4dasm.$(OBJEXT) \
2.78 - sh4mmio.$(OBJEXT) scif.$(OBJEXT) sh4stat.$(OBJEXT) \
2.79 - xltcache.$(OBJEXT) pmm.$(OBJEXT) cache.$(OBJEXT) \
2.80 - armcore.$(OBJEXT) armdasm.$(OBJEXT) armmem.$(OBJEXT) \
2.81 - aica.$(OBJEXT) audio.$(OBJEXT) pvr2.$(OBJEXT) \
2.82 + mem.$(OBJEXT) sdram.$(OBJEXT) paths.$(OBJEXT) watch.$(OBJEXT) \
2.83 + asic.$(OBJEXT) syscall.$(OBJEXT) bios.$(OBJEXT) \
2.84 + dcload.$(OBJEXT) ide.$(OBJEXT) gdimage.$(OBJEXT) \
2.85 + gdrom.$(OBJEXT) nrg.$(OBJEXT) cdi.$(OBJEXT) gdi.$(OBJEXT) \
2.86 + edc_ecc.$(OBJEXT) mmc.$(OBJEXT) dreamcast.$(OBJEXT) \
2.87 + eventq.$(OBJEXT) sh4.$(OBJEXT) intc.$(OBJEXT) sh4mem.$(OBJEXT) \
2.88 + timer.$(OBJEXT) dmac.$(OBJEXT) mmu.$(OBJEXT) sh4core.$(OBJEXT) \
2.89 + sh4dasm.$(OBJEXT) sh4mmio.$(OBJEXT) scif.$(OBJEXT) \
2.90 + sh4stat.$(OBJEXT) xltcache.$(OBJEXT) pmm.$(OBJEXT) \
2.91 + cache.$(OBJEXT) armcore.$(OBJEXT) armdasm.$(OBJEXT) \
2.92 + armmem.$(OBJEXT) aica.$(OBJEXT) audio.$(OBJEXT) pvr2.$(OBJEXT) \
2.93 pvr2mem.$(OBJEXT) tacore.$(OBJEXT) rendsort.$(OBJEXT) \
2.94 texcache.$(OBJEXT) yuv.$(OBJEXT) rendsave.$(OBJEXT) \
2.95 scene.$(OBJEXT) gl_sl.$(OBJEXT) gl_slsrc.$(OBJEXT) \
2.96 @@ -189,14 +191,14 @@
2.97 lxdream_DEPENDENCIES = $(am__DEPENDENCIES_1)
2.98 am__test_testsh4x86_SOURCES_DIST = test/testsh4x86.c x86dasm/x86dasm.c \
2.99 x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
2.100 - x86dasm/dis-buf.c sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c \
2.101 - sh4/xltcache.c sh4/xltcache.h mem.c util.c sh4/mmu.c
2.102 + x86dasm/dis-buf.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
2.103 + sh4/xltcache.h mem.c util.c
2.104 @BUILD_SH4X86_TRUE@am_test_testsh4x86_OBJECTS = testsh4x86.$(OBJEXT) \
2.105 @BUILD_SH4X86_TRUE@ x86dasm.$(OBJEXT) i386-dis.$(OBJEXT) \
2.106 @BUILD_SH4X86_TRUE@ dis-init.$(OBJEXT) dis-buf.$(OBJEXT) \
2.107 -@BUILD_SH4X86_TRUE@ sh4dasm.$(OBJEXT) sh4trans.$(OBJEXT) \
2.108 -@BUILD_SH4X86_TRUE@ sh4x86.$(OBJEXT) xltcache.$(OBJEXT) \
2.109 -@BUILD_SH4X86_TRUE@ mem.$(OBJEXT) util.$(OBJEXT) mmu.$(OBJEXT)
2.110 +@BUILD_SH4X86_TRUE@ sh4trans.$(OBJEXT) sh4x86.$(OBJEXT) \
2.111 +@BUILD_SH4X86_TRUE@ xltcache.$(OBJEXT) mem.$(OBJEXT) \
2.112 +@BUILD_SH4X86_TRUE@ util.$(OBJEXT)
2.113 test_testsh4x86_OBJECTS = $(am_test_testsh4x86_OBJECTS)
2.114 test_testsh4x86_DEPENDENCIES =
2.115 am__dirstamp = $(am__leading_dot)dirstamp
2.116 @@ -401,43 +403,43 @@
2.117 gendec_SOURCES = tools/gendec.c tools/gendec.h tools/insparse.c tools/actparse.c
2.118 genglsl_SOURCES = tools/genglsl.c
2.119 lxdream_SOURCES = main.c version.c config.c config.h lxdream.h dream.h \
2.120 - gui.h cpu.h hook.h gettext.h mem.c mem.h mmio.h paths.c \
2.121 - watch.c asic.c asic.h clock.h serial.h syscall.c syscall.h \
2.122 - bios.c dcload.c gdrom/ide.c gdrom/ide.h gdrom/packet.h \
2.123 - gdrom/gdimage.c gdrom/gdrom.c gdrom/gdrom.h gdrom/nrg.c \
2.124 - gdrom/cdi.c gdrom/gdi.c gdrom/edc_ecc.c gdrom/ecc.h \
2.125 - gdrom/edc_crctable.h gdrom/edc_encoder.h gdrom/edc_l2sq.h \
2.126 - gdrom/edc_scramble.h gdrom/mmc.c gdrom/gddriver.h dreamcast.c \
2.127 - dreamcast.h eventq.c eventq.h sh4/sh4.c sh4/intc.c sh4/intc.h \
2.128 - sh4/sh4mem.c sh4/timer.c sh4/dmac.c sh4/mmu.c sh4/sh4core.c \
2.129 - sh4/sh4core.h sh4/sh4dasm.c sh4/sh4dasm.h sh4/sh4mmio.c \
2.130 - sh4/sh4mmio.h sh4/scif.c sh4/sh4stat.c sh4/sh4stat.h \
2.131 - sh4/xltcache.c sh4/xltcache.h sh4/sh4.h sh4/dmac.h sh4/pmm.c \
2.132 - sh4/cache.c sh4/mmu.h aica/armcore.c aica/armcore.h \
2.133 - aica/armdasm.c aica/armdasm.h aica/armmem.c aica/aica.c \
2.134 - aica/aica.h aica/audio.c aica/audio.h pvr2/pvr2.c pvr2/pvr2.h \
2.135 - pvr2/pvr2mem.c pvr2/pvr2mmio.h pvr2/tacore.c pvr2/rendsort.c \
2.136 - pvr2/texcache.c pvr2/yuv.c pvr2/rendsave.c pvr2/scene.c \
2.137 - pvr2/scene.h pvr2/gl_sl.c pvr2/gl_slsrc.c pvr2/glutil.c \
2.138 - pvr2/glutil.h pvr2/glrender.c pvr2/vertex.glsl \
2.139 - pvr2/fragment.glsl maple/maple.c maple/maple.h \
2.140 - maple/controller.c maple/kbd.c maple/mouse.c maple/lightgun.c \
2.141 - loader.c loader.h elf.h bootstrap.c bootstrap.h util.c \
2.142 - gdlist.c gdlist.h display.c display.h dckeysyms.h \
2.143 - drivers/audio_null.c drivers/video_null.c drivers/video_gl.c \
2.144 - drivers/video_gl.h drivers/gl_fbo.c sh4/sh4.def sh4/sh4core.in \
2.145 - sh4/sh4x86.in sh4/sh4dasm.in sh4/sh4stat.in $(am__append_1) \
2.146 - $(am__append_3) $(am__append_4) $(am__append_5) \
2.147 - $(am__append_6) $(am__append_7) $(am__append_8) \
2.148 - $(am__append_9) $(am__append_10) $(am__append_11) \
2.149 - $(am__append_12) $(am__append_13) $(am__append_14) \
2.150 - $(am__append_15)
2.151 + gui.h cpu.h hook.h gettext.h mem.c mem.h sdram.c mmio.h \
2.152 + paths.c watch.c asic.c asic.h clock.h serial.h syscall.c \
2.153 + syscall.h bios.c dcload.c gdrom/ide.c gdrom/ide.h \
2.154 + gdrom/packet.h gdrom/gdimage.c gdrom/gdrom.c gdrom/gdrom.h \
2.155 + gdrom/nrg.c gdrom/cdi.c gdrom/gdi.c gdrom/edc_ecc.c \
2.156 + gdrom/ecc.h gdrom/edc_crctable.h gdrom/edc_encoder.h \
2.157 + gdrom/edc_l2sq.h gdrom/edc_scramble.h gdrom/mmc.c \
2.158 + gdrom/gddriver.h dreamcast.c dreamcast.h eventq.c eventq.h \
2.159 + sh4/sh4.c sh4/intc.c sh4/intc.h sh4/sh4mem.c sh4/timer.c \
2.160 + sh4/dmac.c sh4/mmu.c sh4/sh4core.c sh4/sh4core.h sh4/sh4dasm.c \
2.161 + sh4/sh4dasm.h sh4/sh4mmio.c sh4/sh4mmio.h sh4/scif.c \
2.162 + sh4/sh4stat.c sh4/sh4stat.h sh4/xltcache.c sh4/xltcache.h \
2.163 + sh4/sh4.h sh4/dmac.h sh4/pmm.c sh4/cache.c sh4/mmu.h \
2.164 + aica/armcore.c aica/armcore.h aica/armdasm.c aica/armdasm.h \
2.165 + aica/armmem.c aica/aica.c aica/aica.h aica/audio.c \
2.166 + aica/audio.h pvr2/pvr2.c pvr2/pvr2.h pvr2/pvr2mem.c \
2.167 + pvr2/pvr2mmio.h pvr2/tacore.c pvr2/rendsort.c pvr2/texcache.c \
2.168 + pvr2/yuv.c pvr2/rendsave.c pvr2/scene.c pvr2/scene.h \
2.169 + pvr2/gl_sl.c pvr2/gl_slsrc.c pvr2/glutil.c pvr2/glutil.h \
2.170 + pvr2/glrender.c pvr2/vertex.glsl pvr2/fragment.glsl \
2.171 + maple/maple.c maple/maple.h maple/controller.c maple/kbd.c \
2.172 + maple/mouse.c maple/lightgun.c loader.c loader.h elf.h \
2.173 + bootstrap.c bootstrap.h util.c gdlist.c gdlist.h display.c \
2.174 + display.h dckeysyms.h drivers/audio_null.c \
2.175 + drivers/video_null.c drivers/video_gl.c drivers/video_gl.h \
2.176 + drivers/gl_fbo.c sh4/sh4.def sh4/sh4core.in sh4/sh4x86.in \
2.177 + sh4/sh4dasm.in sh4/sh4stat.in $(am__append_1) $(am__append_3) \
2.178 + $(am__append_4) $(am__append_5) $(am__append_6) \
2.179 + $(am__append_7) $(am__append_8) $(am__append_9) \
2.180 + $(am__append_10) $(am__append_11) $(am__append_12) \
2.181 + $(am__append_13) $(am__append_14) $(am__append_15)
2.182 @BUILD_SH4X86_TRUE@test_testsh4x86_LDADD = @GLIB_LIBS@ @GTK_LIBS@ @LIBPNG_LIBS@
2.183 @BUILD_SH4X86_TRUE@test_testsh4x86_SOURCES = test/testsh4x86.c x86dasm/x86dasm.c \
2.184 @BUILD_SH4X86_TRUE@ x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
2.185 @BUILD_SH4X86_TRUE@ x86dasm/dis-buf.c \
2.186 -@BUILD_SH4X86_TRUE@ sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
2.187 -@BUILD_SH4X86_TRUE@ sh4/xltcache.h mem.c util.c sh4/mmu.c
2.188 +@BUILD_SH4X86_TRUE@ sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
2.189 +@BUILD_SH4X86_TRUE@ sh4/xltcache.h mem.c util.c
2.190
2.191 lxdream_LDADD = @GLIB_LIBS@ @GTK_LIBS@ @LIBPNG_LIBS@ @PULSE_LIBS@ @ESOUND_LIBS@ @ALSA_LIBS@ $(INTLLIBS)
2.192 gendec_LDADD = @GLIB_LIBS@ @GTK_LIBS@ $(INTLLIBS)
2.193 @@ -599,6 +601,7 @@
2.194 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mem.Po@am__quote@
2.195 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mmc.Po@am__quote@
2.196 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mmu.Po@am__quote@
2.197 +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mmux86.Po@am__quote@
2.198 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mouse.Po@am__quote@
2.199 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nrg.Po@am__quote@
2.200 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/osx_iokit.Po@am__quote@
2.201 @@ -610,6 +613,7 @@
2.202 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rendsort.Po@am__quote@
2.203 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scene.Po@am__quote@
2.204 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scif.Po@am__quote@
2.205 +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sdram.Po@am__quote@
2.206 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sh4.Po@am__quote@
2.207 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sh4core.Po@am__quote@
2.208 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sh4dasm.Po@am__quote@
2.209 @@ -1408,6 +1412,20 @@
2.210 @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
2.211 @am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o sh4trans.obj `if test -f 'sh4/sh4trans.c'; then $(CYGPATH_W) 'sh4/sh4trans.c'; else $(CYGPATH_W) '$(srcdir)/sh4/sh4trans.c'; fi`
2.212
2.213 +mmux86.o: sh4/mmux86.c
2.214 +@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mmux86.o -MD -MP -MF "$(DEPDIR)/mmux86.Tpo" -c -o mmux86.o `test -f 'sh4/mmux86.c' || echo '$(srcdir)/'`sh4/mmux86.c; \
2.215 +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/mmux86.Tpo" "$(DEPDIR)/mmux86.Po"; else rm -f "$(DEPDIR)/mmux86.Tpo"; exit 1; fi
2.216 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='sh4/mmux86.c' object='mmux86.o' libtool=no @AMDEPBACKSLASH@
2.217 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
2.218 +@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mmux86.o `test -f 'sh4/mmux86.c' || echo '$(srcdir)/'`sh4/mmux86.c
2.219 +
2.220 +mmux86.obj: sh4/mmux86.c
2.221 +@am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mmux86.obj -MD -MP -MF "$(DEPDIR)/mmux86.Tpo" -c -o mmux86.obj `if test -f 'sh4/mmux86.c'; then $(CYGPATH_W) 'sh4/mmux86.c'; else $(CYGPATH_W) '$(srcdir)/sh4/mmux86.c'; fi`; \
2.222 +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/mmux86.Tpo" "$(DEPDIR)/mmux86.Po"; else rm -f "$(DEPDIR)/mmux86.Tpo"; exit 1; fi
2.223 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='sh4/mmux86.c' object='mmux86.obj' libtool=no @AMDEPBACKSLASH@
2.224 +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
2.225 +@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mmux86.obj `if test -f 'sh4/mmux86.c'; then $(CYGPATH_W) 'sh4/mmux86.c'; else $(CYGPATH_W) '$(srcdir)/sh4/mmux86.c'; fi`
2.226 +
2.227 x86dasm.o: x86dasm/x86dasm.c
2.228 @am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT x86dasm.o -MD -MP -MF "$(DEPDIR)/x86dasm.Tpo" -c -o x86dasm.o `test -f 'x86dasm/x86dasm.c' || echo '$(srcdir)/'`x86dasm/x86dasm.c; \
2.229 @am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/x86dasm.Tpo" "$(DEPDIR)/x86dasm.Po"; else rm -f "$(DEPDIR)/x86dasm.Tpo"; exit 1; fi
3.1 --- a/src/lxdream.h Sat Dec 27 04:09:17 2008 +0000
3.2 +++ b/src/lxdream.h Sat Jan 03 03:30:26 2009 +0000
3.3 @@ -92,7 +92,7 @@
3.4 const char *get_locale_path();
3.5
3.6 #ifdef HAVE_FASTCALL
3.7 -#define FASTCALL __attribute__((regparm(3)))
3.8 +#define FASTCALL __attribute__((regparm(2)))
3.9 #else
3.10 #define FASTCALL
3.11 #endif
4.1 --- a/src/mem.c Sat Dec 27 04:09:17 2008 +0000
4.2 +++ b/src/mem.c Sat Jan 03 03:30:26 2009 +0000
4.3 @@ -35,6 +35,10 @@
4.4 #include "mmio.h"
4.5 #include "dreamcast.h"
4.6
4.7 +#ifndef PAGE_SIZE
4.8 +#define PAGE_SIZE 4096
4.9 +#endif
4.10 +
4.11 sh4ptr_t *page_map = NULL;
4.12 mem_region_fn_t *ext_address_space = NULL;
4.13
4.14 @@ -91,6 +95,18 @@
4.15 return mem;
4.16 }
4.17
4.18 +void mem_unprotect( void *region, uint32_t size )
4.19 +{
4.20 + /* Force page alignment */
4.21 + uintptr_t i = (uintptr_t)region;
4.22 + uintptr_t mask = ~(PAGE_SIZE-1);
4.23 + void *ptr = (void *)(i & mask);
4.24 + size_t len = i & (PAGE_SIZE-1) + size;
4.25 + len = (len + (PAGE_SIZE-1)) & mask;
4.26 +
4.27 + int status = mprotect( ptr, len, PROT_READ|PROT_WRITE|PROT_EXEC );
4.28 + assert( status == 0 );
4.29 +}
4.30
4.31 void mem_init( void )
4.32 {
5.1 --- a/src/mem.h Sat Dec 27 04:09:17 2008 +0000
5.2 +++ b/src/mem.h Sat Jan 03 03:30:26 2009 +0000
5.3 @@ -28,6 +28,17 @@
5.4 extern "C" {
5.5 #endif
5.6
5.7 +
5.8 +typedef FASTCALL int32_t (*mem_read_fn_t)(sh4addr_t);
5.9 +typedef FASTCALL void (*mem_write_fn_t)(sh4addr_t, uint32_t);
5.10 +typedef FASTCALL void (*mem_read_burst_fn_t)(unsigned char *,sh4addr_t);
5.11 +typedef FASTCALL void (*mem_write_burst_fn_t)(sh4addr_t,unsigned char *);
5.12 +
5.13 +typedef FASTCALL int32_t (*mem_read_exc_fn_t)(sh4addr_t, void *);
5.14 +typedef FASTCALL void (*mem_write_exc_fn_t)(sh4addr_t, uint32_t, void *);
5.15 +typedef FASTCALL void (*mem_read_burst_exc_fn_t)(unsigned char *,sh4addr_t, void *);
5.16 +typedef FASTCALL void (*mem_write_burst_exc_fn_t)(sh4addr_t,unsigned char *, void *);
5.17 +
5.18 /**
5.19 * Basic memory region vtable - read/write at byte, word, long, and burst
5.20 * (32-byte) sizes.
5.21 @@ -141,6 +152,11 @@
5.22 #define SIGNEXT48(n) ((((int64_t)(n))<<16)>>16)
5.23 #define ZEROEXT32(n) ((int64_t)((uint64_t)((uint32_t)(n))))
5.24
5.25 +/* Ensure the given region allows all of read/write/execute. If not
5.26 + * page-aligned, some surrounding regions will similarly be unprotected.
5.27 + */
5.28 +void mem_unprotect( void *ptr, uint32_t size );
5.29 +
5.30 #ifdef __cplusplus
5.31 }
5.32 #endif
6.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
6.2 +++ b/src/sdram.c Sat Jan 03 03:30:26 2009 +0000
6.3 @@ -0,0 +1,65 @@
6.4 +/**
6.5 + * $Id: sdram.c 954 2008-12-26 14:25:23Z nkeynes $
6.6 + *
6.7 + * Dreamcast main SDRAM - access methods and timing controls. This is fairly
6.8 + * directly coupled to the SH4
6.9 + *
6.10 + * Copyright (c) 2005 Nathan Keynes.
6.11 + *
6.12 + * This program is free software; you can redistribute it and/or modify
6.13 + * it under the terms of the GNU General Public License as published by
6.14 + * the Free Software Foundation; either version 2 of the License, or
6.15 + * (at your option) any later version.
6.16 + *
6.17 + * This program is distributed in the hope that it will be useful,
6.18 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
6.19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
6.20 + * GNU General Public License for more details.
6.21 + */
6.22 +
6.23 +#include "lxdream.h"
6.24 +#include "mem.h"
6.25 +#include "dreamcast.h"
6.26 +#include <string.h>
6.27 +
6.28 +
6.29 +static int32_t FASTCALL ext_sdram_read_long( sh4addr_t addr )
6.30 +{
6.31 + return *((int32_t *)(dc_main_ram + (addr&0x00FFFFFF)));
6.32 +}
6.33 +static int32_t FASTCALL ext_sdram_read_word( sh4addr_t addr )
6.34 +{
6.35 + return SIGNEXT16(*((int16_t *)(dc_main_ram + (addr&0x00FFFFFF))));
6.36 +}
6.37 +static int32_t FASTCALL ext_sdram_read_byte( sh4addr_t addr )
6.38 +{
6.39 + return SIGNEXT8(*((int16_t *)(dc_main_ram + (addr&0x00FFFFFF))));
6.40 +}
6.41 +static void FASTCALL ext_sdram_write_long( sh4addr_t addr, uint32_t val )
6.42 +{
6.43 + *(uint32_t *)(dc_main_ram + (addr&0x00FFFFFF)) = val;
6.44 + xlat_invalidate_long(addr);
6.45 +}
6.46 +static void FASTCALL ext_sdram_write_word( sh4addr_t addr, uint32_t val )
6.47 +{
6.48 + *(uint16_t *)(dc_main_ram + (addr&0x00FFFFFF)) = (uint16_t)val;
6.49 + xlat_invalidate_word(addr);
6.50 +}
6.51 +static void FASTCALL ext_sdram_write_byte( sh4addr_t addr, uint32_t val )
6.52 +{
6.53 + *(uint8_t *)(dc_main_ram + (addr&0x00FFFFFF)) = (uint8_t)val;
6.54 + xlat_invalidate_word(addr);
6.55 +}
6.56 +static void FASTCALL ext_sdram_read_burst( unsigned char *dest, sh4addr_t addr )
6.57 +{
6.58 + memcpy( dest, dc_main_ram+(addr&0x00FFFFFF), 32 );
6.59 +}
6.60 +static void FASTCALL ext_sdram_write_burst( sh4addr_t addr, unsigned char *src )
6.61 +{
6.62 + memcpy( dc_main_ram+(addr&0x00FFFFFF), src, 32 );
6.63 +}
6.64 +
6.65 +struct mem_region_fn mem_region_sdram = { ext_sdram_read_long, ext_sdram_write_long,
6.66 + ext_sdram_read_word, ext_sdram_write_word,
6.67 + ext_sdram_read_byte, ext_sdram_write_byte,
6.68 + ext_sdram_read_burst, ext_sdram_write_burst };
7.1 --- a/src/sh4/cache.c Sat Dec 27 04:09:17 2008 +0000
7.2 +++ b/src/sh4/cache.c Sat Jan 03 03:30:26 2009 +0000
7.3 @@ -1,6 +1,6 @@
7.4 /**
7.5 * $Id$
7.6 - * Implements the on-chip operand cache and instruction caches
7.7 + * Implements the on-chip operand cache, instruction cache, and store queue.
7.8 *
7.9 * Copyright (c) 2008 Nathan Keynes.
7.10 *
7.11 @@ -295,4 +295,21 @@
7.12 sh4_address_space[i] = &mem_region_unmapped;
7.13 break;
7.14 }
7.15 -}
7.16 \ No newline at end of file
7.17 +}
7.18 +
7.19 +
7.20 +/***** Store-queue (considered part of the cache by the SH7750 manual) ******/
7.21 +static void FASTCALL p4_storequeue_write_long( sh4addr_t addr, uint32_t val )
7.22 +{
7.23 + sh4r.store_queue[(addr>>2)&0xF] = val;
7.24 +}
7.25 +static int32_t FASTCALL p4_storequeue_read_long( sh4addr_t addr )
7.26 +{
7.27 + return sh4r.store_queue[(addr>>2)&0xF];
7.28 +}
7.29 +
7.30 +struct mem_region_fn p4_region_storequeue = {
7.31 + p4_storequeue_read_long, p4_storequeue_write_long,
7.32 + p4_storequeue_read_long, p4_storequeue_write_long,
7.33 + p4_storequeue_read_long, p4_storequeue_write_long,
7.34 + unmapped_read_burst, unmapped_write_burst }; // No burst access.
8.1 --- a/src/sh4/ia32abi.h Sat Dec 27 04:09:17 2008 +0000
8.2 +++ b/src/sh4/ia32abi.h Sat Jan 03 03:30:26 2009 +0000
8.3 @@ -65,6 +65,15 @@
8.4 CALL_r32disp8(preg, disp8);
8.5 }
8.6
8.7 +static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
8.8 +{
8.9 + if( arg1 != R_EAX ) {
8.10 + MOV_r32_r32( arg1, R_EAX );
8.11 + }
8.12 + load_exc_backpatch(R_EDX);
8.13 + CALL_r32disp8(preg, disp8);
8.14 +}
8.15 +
8.16 static inline void call_func2( void *ptr, int arg1, int arg2 )
8.17 {
8.18 if( arg2 != R_EDX ) {
8.19 @@ -98,6 +107,18 @@
8.20 CALL_r32disp8(preg, disp8);
8.21 }
8.22
8.23 +static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
8.24 +{
8.25 + if( arg2 != R_EDX ) {
8.26 + MOV_r32_r32( arg2, R_EDX );
8.27 + }
8.28 + if( arg1 != R_EAX ) {
8.29 + MOV_r32_r32( arg1, R_EAX );
8.30 + }
8.31 + MOV_backpatch_esp8( 0 );
8.32 + CALL_r32disp8(preg, disp8);
8.33 +}
8.34 +
8.35
8.36
8.37 static inline void call_func1_exc( void *ptr, int arg1, int pc )
8.38 @@ -121,35 +142,6 @@
8.39 CALL_ptr(ptr);
8.40 }
8.41
8.42 -/**
8.43 - * Write a double (64-bit) value into memory, with the first word in arg2a, and
8.44 - * the second in arg2b
8.45 - */
8.46 -static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
8.47 -{
8.48 - MOV_r32_esp8(addr, 0);
8.49 - MOV_r32_esp8(arg2b, 4);
8.50 - MEM_WRITE_LONG(addr, arg2a);
8.51 - MOV_esp8_r32(0, R_EAX);
8.52 - MOV_esp8_r32(4, R_EDX);
8.53 - ADD_imm8s_r32(4, R_EAX);
8.54 - MEM_WRITE_LONG(R_EAX, R_EDX);
8.55 -}
8.56 -
8.57 -/**
8.58 - * Read a double (64-bit) value from memory, writing the first word into arg2a
8.59 - * and the second into arg2b. The addr must not be in EAX
8.60 - */
8.61 -static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
8.62 -{
8.63 - MOV_r32_esp8(addr, 0);
8.64 - MEM_READ_LONG(addr, R_EAX);
8.65 - MOV_r32_esp8(R_EAX, 4);
8.66 - MOV_esp8_r32(0, R_EAX);
8.67 - ADD_imm8s_r32(4, R_EAX);
8.68 - MEM_READ_LONG(R_EAX, arg2b );
8.69 - MOV_esp8_r32(4, arg2a);
8.70 -}
8.71 #else
8.72 static inline void call_func1( void *ptr, int arg1 )
8.73 {
8.74 @@ -168,44 +160,6 @@
8.75 ADD_imm8s_r32( 16, R_ESP );
8.76 }
8.77
8.78 -/**
8.79 - * Write a double (64-bit) value into memory, with the first word in arg2a, and
8.80 - * the second in arg2b
8.81 - */
8.82 -static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
8.83 -{
8.84 - SUB_imm8s_r32( 8, R_ESP );
8.85 - PUSH_r32(arg2b);
8.86 - LEA_r32disp8_r32( addr, 4, arg2b );
8.87 - PUSH_r32(arg2b);
8.88 - SUB_imm8s_r32( 8, R_ESP );
8.89 - PUSH_r32(arg2a);
8.90 - PUSH_r32(addr);
8.91 - CALL_ptr(sh4_write_long);
8.92 - ADD_imm8s_r32( 16, R_ESP );
8.93 - CALL_ptr(sh4_write_long);
8.94 - ADD_imm8s_r32( 16, R_ESP );
8.95 -}
8.96 -
8.97 -/**
8.98 - * Read a double (64-bit) value from memory, writing the first word into arg2a
8.99 - * and the second into arg2b. The addr must not be in EAX
8.100 - */
8.101 -static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
8.102 -{
8.103 - SUB_imm8s_r32( 12, R_ESP );
8.104 - PUSH_r32(addr);
8.105 - CALL_ptr(sh4_read_long);
8.106 - MOV_r32_esp8(R_EAX, 4);
8.107 - ADD_imm8s_esp8(4, 0);
8.108 - CALL_ptr(sh4_read_long);
8.109 - if( arg2b != R_EAX ) {
8.110 - MOV_r32_r32( R_EAX, arg2b );
8.111 - }
8.112 - MOV_esp8_r32( 4, arg2a );
8.113 - ADD_imm8s_r32( 16, R_ESP );
8.114 -}
8.115 -
8.116 #endif
8.117
8.118 /**
9.1 --- a/src/sh4/ia64abi.h Sat Dec 27 04:09:17 2008 +0000
9.2 +++ b/src/sh4/ia64abi.h Sat Jan 03 03:30:26 2009 +0000
9.3 @@ -64,6 +64,13 @@
9.4 CALL_r32disp8(preg, disp8);
9.5 }
9.6
9.7 +static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
9.8 +{
9.9 + REXW(); MOV_r32_r32(arg1, R_EDI);
9.10 + load_exc_backpatch(R_ESI);
9.11 + CALL_r32disp8(preg, disp8);
9.12 +}
9.13 +
9.14 #define CALL_FUNC2_SIZE 16
9.15 static inline void call_func2( void *ptr, int arg1, int arg2 )
9.16 {
9.17 @@ -79,41 +86,14 @@
9.18 CALL_r32disp8(preg, disp8);
9.19 }
9.20
9.21 -
9.22 -#define MEM_WRITE_DOUBLE_SIZE 35
9.23 -/**
9.24 - * Write a double (64-bit) value into memory, with the first word in arg2a, and
9.25 - * the second in arg2b
9.26 - */
9.27 -static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
9.28 +static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
9.29 {
9.30 - PUSH_r32(arg2b);
9.31 - PUSH_r32(addr);
9.32 - call_func2(sh4_write_long, addr, arg2a);
9.33 - POP_r32(R_EDI);
9.34 - POP_r32(R_ESI);
9.35 - ADD_imm8s_r32(4, R_EDI);
9.36 - call_func0(sh4_write_long);
9.37 + REXW(); MOV_r32_r32(arg1, R_EDI);
9.38 + REXW(); MOV_r32_r32(arg2, R_ESI);
9.39 + load_exc_backpatch(R_EDX);
9.40 + CALL_r32disp8(preg, disp8);
9.41 }
9.42
9.43 -#define MEM_READ_DOUBLE_SIZE 43
9.44 -/**
9.45 - * Read a double (64-bit) value from memory, writing the first word into arg2a
9.46 - * and the second into arg2b. The addr must not be in EAX
9.47 - */
9.48 -static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
9.49 -{
9.50 - REXW(); SUB_imm8s_r32( 8, R_ESP );
9.51 - PUSH_r32(addr);
9.52 - call_func1(sh4_read_long, addr);
9.53 - POP_r32(R_EDI);
9.54 - PUSH_r32(R_EAX);
9.55 - ADD_imm8s_r32(4, R_EDI);
9.56 - call_func0(sh4_read_long);
9.57 - MOV_r32_r32(R_EAX, arg2b);
9.58 - POP_r32(arg2a);
9.59 - REXW(); ADD_imm8s_r32( 8, R_ESP );
9.60 -}
9.61
9.62
9.63 /**
10.1 --- a/src/sh4/mmu.c Sat Dec 27 04:09:17 2008 +0000
10.2 +++ b/src/sh4/mmu.c Sat Jan 03 03:30:26 2009 +0000
10.3 @@ -1,7 +1,8 @@
10.4 /**
10.5 * $Id$
10.6 *
10.7 - * MMU implementation
10.8 + * SH4 MMU implementation based on address space page maps. This module
10.9 + * is responsible for all address decoding functions.
10.10 *
10.11 * Copyright (c) 2005 Nathan Keynes.
10.12 *
10.13 @@ -26,169 +27,129 @@
10.14 #include "mem.h"
10.15 #include "mmu.h"
10.16
10.17 -#ifdef HAVE_FRAME_ADDRESS
10.18 -#define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
10.19 -#else
10.20 -#define RETURN_VIA(exc) return MMU_VMA_ERROR
10.21 -#endif
10.22 -
10.23 -/* The MMU (practically unique in the system) is allowed to raise exceptions
10.24 - * directly, with a return code indicating that one was raised and the caller
10.25 - * had better behave appropriately.
10.26 - */
10.27 #define RAISE_TLB_ERROR(code, vpn) \
10.28 MMIO_WRITE(MMU, TEA, vpn); \
10.29 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
10.30 sh4_raise_tlb_exception(code);
10.31 -
10.32 #define RAISE_MEM_ERROR(code, vpn) \
10.33 MMIO_WRITE(MMU, TEA, vpn); \
10.34 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
10.35 sh4_raise_exception(code);
10.36 -
10.37 -#define RAISE_OTHER_ERROR(code) \
10.38 - sh4_raise_exception(code);
10.39 -/**
10.40 - * Abort with a non-MMU address error. Caused by user-mode code attempting
10.41 - * to access privileged regions, or alignment faults.
10.42 - */
10.43 -#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
10.44 -#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
10.45 -
10.46 -#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
10.47 -#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
10.48 -#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
10.49 -#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
10.50 -#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
10.51 -#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
10.52 +#define RAISE_TLB_MULTIHIT_ERROR(vpn) \
10.53 + sh4_raise_reset(EXC_TLB_MULTI_HIT); \
10.54 MMIO_WRITE(MMU, TEA, vpn); \
10.55 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
10.56
10.57 +/* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
10.58 +#define IS_1K_PAGE_ENTRY(ent) ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
10.59
10.60 -#define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
10.61 -#define OCRAM_END (0x20000000>>LXDREAM_PAGE_BITS)
10.62 +/* Primary address space (used directly by SH4 cores) */
10.63 +mem_region_fn_t *sh4_address_space;
10.64 +mem_region_fn_t *sh4_user_address_space;
10.65
10.66 +/* MMU-mapped storequeue targets. Only used with TLB on */
10.67 +mem_region_fn_t *storequeue_address_space;
10.68 +mem_region_fn_t *storequeue_user_address_space;
10.69
10.70 +/* Accessed from the UTLB accessor methods */
10.71 +uint32_t mmu_urc;
10.72 +uint32_t mmu_urb;
10.73 +
10.74 +/* Module globals */
10.75 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
10.76 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
10.77 -static uint32_t mmu_urc;
10.78 -static uint32_t mmu_urb;
10.79 +static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
10.80 static uint32_t mmu_lrui;
10.81 static uint32_t mmu_asid; // current asid
10.82
10.83 -static struct utlb_sort_entry mmu_utlb_sorted[UTLB_ENTRY_COUNT];
10.84 -static uint32_t mmu_utlb_entries; // Number of entries in mmu_utlb_sorted.
10.85 +/* Structures for 1K page handling */
10.86 +static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
10.87 +static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
10.88 +static int mmu_utlb_1k_free_index;
10.89
10.90 -static sh4ptr_t cache = NULL;
10.91
10.92 +/* Function prototypes */
10.93 static void mmu_invalidate_tlb();
10.94 -static void mmu_utlb_sorted_reset();
10.95 -static void mmu_utlb_sorted_reload();
10.96 +static void mmu_utlb_register_all();
10.97 +static void mmu_utlb_remove_entry(int);
10.98 +static void mmu_utlb_insert_entry(int);
10.99 +static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
10.100 +static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
10.101 +static void mmu_set_tlb_enabled( int tlb_on );
10.102 +static void mmu_set_tlb_asid( uint32_t asid );
10.103 +static void mmu_set_storequeue_protected( int protected );
10.104 +static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
10.105 +static gboolean mmu_utlb_unmap_pages( gboolean unmap_user, sh4addr_t start_addr, int npages );
10.106 +static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
10.107 +static void mmu_utlb_1k_init();
10.108 +static struct utlb_1k_entry *mmu_utlb_1k_alloc();
10.109 +static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
10.110
10.111 -static uint32_t get_mask_for_flags( uint32_t flags )
10.112 -{
10.113 - switch( flags & TLB_SIZE_MASK ) {
10.114 - case TLB_SIZE_1K: return MASK_1K;
10.115 - case TLB_SIZE_4K: return MASK_4K;
10.116 - case TLB_SIZE_64K: return MASK_64K;
10.117 - case TLB_SIZE_1M: return MASK_1M;
10.118 - default: return 0; /* Unreachable */
10.119 - }
10.120 -}
10.121 +static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
10.122 +static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
10.123 +static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
10.124 +static uint32_t get_tlb_size_mask( uint32_t flags );
10.125 +static uint32_t get_tlb_size_pages( uint32_t flags );
10.126
10.127 -MMIO_REGION_READ_FN( MMU, reg )
10.128 -{
10.129 - reg &= 0xFFF;
10.130 - switch( reg ) {
10.131 - case MMUCR:
10.132 - return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
10.133 - default:
10.134 - return MMIO_READ( MMU, reg );
10.135 - }
10.136 -}
10.137
10.138 -MMIO_REGION_WRITE_FN( MMU, reg, val )
10.139 -{
10.140 - uint32_t tmp;
10.141 - reg &= 0xFFF;
10.142 - switch(reg) {
10.143 - case SH4VER:
10.144 - return;
10.145 - case PTEH:
10.146 - val &= 0xFFFFFCFF;
10.147 - if( (val & 0xFF) != mmu_asid ) {
10.148 - mmu_asid = val&0xFF;
10.149 - sh4_icache.page_vma = -1; // invalidate icache as asid has changed
10.150 - }
10.151 - break;
10.152 - case PTEL:
10.153 - val &= 0x1FFFFDFF;
10.154 - break;
10.155 - case PTEA:
10.156 - val &= 0x0000000F;
10.157 - break;
10.158 - case TRA:
10.159 - val &= 0x000003FC;
10.160 - break;
10.161 - case EXPEVT:
10.162 - case INTEVT:
10.163 - val &= 0x00000FFF;
10.164 - break;
10.165 - case MMUCR:
10.166 - if( val & MMUCR_TI ) {
10.167 - mmu_invalidate_tlb();
10.168 - }
10.169 - mmu_urc = (val >> 10) & 0x3F;
10.170 - mmu_urb = (val >> 18) & 0x3F;
10.171 - mmu_lrui = (val >> 26) & 0x3F;
10.172 - val &= 0x00000301;
10.173 - tmp = MMIO_READ( MMU, MMUCR );
10.174 - if( (val ^ tmp) & (MMUCR_AT|MMUCR_SV) ) {
10.175 - // AT flag has changed state - flush the xlt cache as all bets
10.176 - // are off now. We also need to force an immediate exit from the
10.177 - // current block
10.178 - MMIO_WRITE( MMU, MMUCR, val );
10.179 - sh4_flush_icache();
10.180 - }
10.181 - break;
10.182 - case CCR:
10.183 - CCN_set_cache_control( val );
10.184 - val &= 0x81A7;
10.185 - break;
10.186 - case MMUUNK1:
10.187 - /* Note that if the high bit is set, this appears to reset the machine.
10.188 - * Not emulating this behaviour yet until we know why...
10.189 - */
10.190 - val &= 0x00010007;
10.191 - break;
10.192 - case QACR0:
10.193 - case QACR1:
10.194 - val &= 0x0000001C;
10.195 - break;
10.196 - case PMCR1:
10.197 - PMM_write_control(0, val);
10.198 - val &= 0x0000C13F;
10.199 - break;
10.200 - case PMCR2:
10.201 - PMM_write_control(1, val);
10.202 - val &= 0x0000C13F;
10.203 - break;
10.204 - default:
10.205 - break;
10.206 - }
10.207 - MMIO_WRITE( MMU, reg, val );
10.208 -}
10.209 +/*********************** Module public functions ****************************/
10.210
10.211 -
10.212 +/**
10.213 + * Allocate memory for the address space maps, and initialize them according
10.214 + * to the default (reset) values. (TLB is disabled by default)
10.215 + */
10.216 +
10.217 void MMU_init()
10.218 {
10.219 + sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
10.220 + sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
10.221 + storequeue_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
10.222 + storequeue_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
10.223 +
10.224 + mmu_set_tlb_enabled(0);
10.225 + mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
10.226 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue);
10.227 +
10.228 + /* Setup P4 tlb/cache access regions */
10.229 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
10.230 + mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
10.231 + mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
10.232 + mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
10.233 + mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
10.234 + mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
10.235 + mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
10.236 + mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
10.237 + mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
10.238 + mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
10.239 + mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
10.240 +
10.241 + /* Setup P4 control region */
10.242 + mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
10.243 + mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
10.244 + mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
10.245 + mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
10.246 + mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
10.247 + mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
10.248 + mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
10.249 + mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
10.250 + mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
10.251 + mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
10.252 + mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
10.253 + mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
10.254 + mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
10.255 +
10.256 + register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
10.257 + mmu_utlb_1k_init();
10.258 +
10.259 + /* Ensure the code regions are executable */
10.260 + mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
10.261 + mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
10.262 }
10.263
10.264 void MMU_reset()
10.265 {
10.266 mmio_region_MMU_write( CCR, 0 );
10.267 mmio_region_MMU_write( MMUCR, 0 );
10.268 - mmu_utlb_sorted_reload();
10.269 }
10.270
10.271 void MMU_save_state( FILE *f )
10.272 @@ -221,133 +182,20 @@
10.273 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
10.274 return 1;
10.275 }
10.276 - mmu_utlb_sorted_reload();
10.277 +
10.278 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
10.279 + mmu_set_tlb_enabled(mmucr&MMUCR_AT);
10.280 + mmu_set_storequeue_protected(mmucr&MMUCR_SQMD);
10.281 return 0;
10.282 }
10.283
10.284 -
10.285 -/******************* Sorted TLB data structure ****************/
10.286 -/*
10.287 - * mmu_utlb_sorted maintains a list of all active (valid) entries,
10.288 - * sorted by masked VPN and then ASID. Multi-hit entries are resolved
10.289 - * ahead of time, and have -1 recorded as the corresponding PPN.
10.290 - *
10.291 - * FIXME: Multi-hit detection doesn't pick up cases where two pages
10.292 - * overlap due to different sizes (and don't share the same base
10.293 - * address).
10.294 - */
10.295 -static void mmu_utlb_sorted_reset()
10.296 -{
10.297 - mmu_utlb_entries = 0;
10.298 -}
10.299 -
10.300 -/**
10.301 - * Find an entry in the sorted table (VPN+ASID check).
10.302 - */
10.303 -static inline int mmu_utlb_sorted_find( sh4addr_t vma )
10.304 -{
10.305 - int low = 0;
10.306 - int high = mmu_utlb_entries;
10.307 - uint32_t lookup = (vma & 0xFFFFFC00) + mmu_asid;
10.308 -
10.309 - mmu_urc++;
10.310 - if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
10.311 - mmu_urc = 0;
10.312 - }
10.313 -
10.314 - while( low != high ) {
10.315 - int posn = (high+low)>>1;
10.316 - int masked = lookup & mmu_utlb_sorted[posn].mask;
10.317 - if( mmu_utlb_sorted[posn].key < masked ) {
10.318 - low = posn+1;
10.319 - } else if( mmu_utlb_sorted[posn].key > masked ) {
10.320 - high = posn;
10.321 - } else {
10.322 - return mmu_utlb_sorted[posn].entryNo;
10.323 - }
10.324 - }
10.325 - return -1;
10.326 -
10.327 -}
10.328 -
10.329 -static void mmu_utlb_insert_entry( int entry )
10.330 -{
10.331 - int low = 0;
10.332 - int high = mmu_utlb_entries;
10.333 - uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
10.334 -
10.335 - assert( mmu_utlb_entries < UTLB_ENTRY_COUNT );
10.336 - /* Find the insertion point */
10.337 - while( low != high ) {
10.338 - int posn = (high+low)>>1;
10.339 - if( mmu_utlb_sorted[posn].key < key ) {
10.340 - low = posn+1;
10.341 - } else if( mmu_utlb_sorted[posn].key > key ) {
10.342 - high = posn;
10.343 - } else {
10.344 - /* Exact match - multi-hit */
10.345 - mmu_utlb_sorted[posn].entryNo = -2;
10.346 - return;
10.347 - }
10.348 - } /* 0 2 4 6 */
10.349 - memmove( &mmu_utlb_sorted[low+1], &mmu_utlb_sorted[low],
10.350 - (mmu_utlb_entries - low) * sizeof(struct utlb_sort_entry) );
10.351 - mmu_utlb_sorted[low].key = key;
10.352 - mmu_utlb_sorted[low].mask = mmu_utlb[entry].mask | 0x000000FF;
10.353 - mmu_utlb_sorted[low].entryNo = entry;
10.354 - mmu_utlb_entries++;
10.355 -}
10.356 -
10.357 -static void mmu_utlb_remove_entry( int entry )
10.358 -{
10.359 - int low = 0;
10.360 - int high = mmu_utlb_entries;
10.361 - uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
10.362 - while( low != high ) {
10.363 - int posn = (high+low)>>1;
10.364 - if( mmu_utlb_sorted[posn].key < key ) {
10.365 - low = posn+1;
10.366 - } else if( mmu_utlb_sorted[posn].key > key ) {
10.367 - high = posn;
10.368 - } else {
10.369 - if( mmu_utlb_sorted[posn].entryNo == -2 ) {
10.370 - /* Multiple-entry recorded - rebuild the whole table minus entry */
10.371 - int i;
10.372 - mmu_utlb_entries = 0;
10.373 - for( i=0; i< UTLB_ENTRY_COUNT; i++ ) {
10.374 - if( i != entry && (mmu_utlb[i].flags & TLB_VALID) ) {
10.375 - mmu_utlb_insert_entry(i);
10.376 - }
10.377 - }
10.378 - } else {
10.379 - mmu_utlb_entries--;
10.380 - memmove( &mmu_utlb_sorted[posn], &mmu_utlb_sorted[posn+1],
10.381 - (mmu_utlb_entries - posn)*sizeof(struct utlb_sort_entry) );
10.382 - }
10.383 - return;
10.384 - }
10.385 - }
10.386 - assert( 0 && "UTLB key not found!" );
10.387 -}
10.388 -
10.389 -static void mmu_utlb_sorted_reload()
10.390 -{
10.391 - int i;
10.392 - mmu_utlb_entries = 0;
10.393 - for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
10.394 - if( mmu_utlb[i].flags & TLB_VALID )
10.395 - mmu_utlb_insert_entry( i );
10.396 - }
10.397 -}
10.398 -
10.399 -/* TLB maintanence */
10.400 -
10.401 /**
10.402 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
10.403 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
10.404 */
10.405 void MMU_ldtlb()
10.406 {
10.407 + mmu_urc %= mmu_urb;
10.408 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
10.409 mmu_utlb_remove_entry( mmu_urc );
10.410 mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
10.411 @@ -355,176 +203,539 @@
10.412 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
10.413 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
10.414 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
10.415 - mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
10.416 - if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
10.417 - mmu_utlb[mmu_urc].ppn |= 0xE0000000;
10.418 + mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
10.419 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
10.420 mmu_utlb_insert_entry( mmu_urc );
10.421 }
10.422
10.423 +
10.424 +MMIO_REGION_READ_FN( MMU, reg )
10.425 +{
10.426 + reg &= 0xFFF;
10.427 + switch( reg ) {
10.428 + case MMUCR:
10.429 + mmu_urc %= mmu_urb;
10.430 + return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
10.431 + default:
10.432 + return MMIO_READ( MMU, reg );
10.433 + }
10.434 +}
10.435 +
10.436 +MMIO_REGION_WRITE_FN( MMU, reg, val )
10.437 +{
10.438 + uint32_t tmp;
10.439 + reg &= 0xFFF;
10.440 + switch(reg) {
10.441 + case SH4VER:
10.442 + return;
10.443 + case PTEH:
10.444 + val &= 0xFFFFFCFF;
10.445 + if( (val & 0xFF) != mmu_asid ) {
10.446 + mmu_set_tlb_asid( val&0xFF );
10.447 + sh4_icache.page_vma = -1; // invalidate icache as asid has changed
10.448 + }
10.449 + break;
10.450 + case PTEL:
10.451 + val &= 0x1FFFFDFF;
10.452 + break;
10.453 + case PTEA:
10.454 + val &= 0x0000000F;
10.455 + break;
10.456 + case TRA:
10.457 + val &= 0x000003FC;
10.458 + break;
10.459 + case EXPEVT:
10.460 + case INTEVT:
10.461 + val &= 0x00000FFF;
10.462 + break;
10.463 + case MMUCR:
10.464 + if( val & MMUCR_TI ) {
10.465 + mmu_invalidate_tlb();
10.466 + }
10.467 + mmu_urc = (val >> 10) & 0x3F;
10.468 + mmu_urb = (val >> 18) & 0x3F;
10.469 + if( mmu_urb == 0 ) {
10.470 + mmu_urb = 0x40;
10.471 + }
10.472 + mmu_lrui = (val >> 26) & 0x3F;
10.473 + val &= 0x00000301;
10.474 + tmp = MMIO_READ( MMU, MMUCR );
10.475 + if( (val ^ tmp) & (MMUCR_SQMD) ) {
10.476 + mmu_set_storequeue_protected( val & MMUCR_SQMD );
10.477 + }
10.478 + if( (val ^ tmp) & (MMUCR_AT) ) {
10.479 + // AT flag has changed state - flush the xlt cache as all bets
10.480 + // are off now. We also need to force an immediate exit from the
10.481 + // current block
10.482 + mmu_set_tlb_enabled( val & MMUCR_AT );
10.483 + MMIO_WRITE( MMU, MMUCR, val );
10.484 + sh4_flush_icache();
10.485 + }
10.486 + break;
10.487 + case CCR:
10.488 + CCN_set_cache_control( val );
10.489 + val &= 0x81A7;
10.490 + break;
10.491 + case MMUUNK1:
10.492 + /* Note that if the high bit is set, this appears to reset the machine.
10.493 + * Not emulating this behaviour yet until we know why...
10.494 + */
10.495 + val &= 0x00010007;
10.496 + break;
10.497 + case QACR0:
10.498 + case QACR1:
10.499 + val &= 0x0000001C;
10.500 + break;
10.501 + case PMCR1:
10.502 + PMM_write_control(0, val);
10.503 + val &= 0x0000C13F;
10.504 + break;
10.505 + case PMCR2:
10.506 + PMM_write_control(1, val);
10.507 + val &= 0x0000C13F;
10.508 + break;
10.509 + default:
10.510 + break;
10.511 + }
10.512 + MMIO_WRITE( MMU, reg, val );
10.513 +}
10.514 +
10.515 +/********************** 1K Page handling ***********************/
10.516 +/* Since we use 4K pages as our native page size, 1K pages need a bit of extra
10.517 + * effort to manage - we justify this on the basis that most programs won't
10.518 + * actually use 1K pages, so we may as well optimize for the common case.
10.519 + *
10.520 + * Implementation uses an intermediate page entry (the utlb_1k_entry) that
10.521 + * redirects requests to the 'real' page entry. These are allocated on an
10.522 + * as-needed basis, and returned to the pool when all subpages are empty.
10.523 + */
10.524 +static void mmu_utlb_1k_init()
10.525 +{
10.526 + int i;
10.527 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
10.528 + mmu_utlb_1k_free_list[i] = i;
10.529 + mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
10.530 + }
10.531 + mmu_utlb_1k_free_index = 0;
10.532 +}
10.533 +
10.534 +static struct utlb_1k_entry *mmu_utlb_1k_alloc()
10.535 +{
10.536 + assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
10.537 + struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
10.538 + return entry;
10.539 +}
10.540 +
10.541 +static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
10.542 +{
10.543 + unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
10.544 + assert( entryNo < UTLB_ENTRY_COUNT );
10.545 + assert( mmu_utlb_1k_free_index > 0 );
10.546 + mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
10.547 +}
10.548 +
10.549 +
10.550 +/********************** Address space maintenance *************************/
10.551 +
10.552 +/**
10.553 + * MMU accessor functions just increment URC - fixup here if necessary
10.554 + */
10.555 +static inline void mmu_urc_fixup()
10.556 +{
10.557 + mmu_urc %= mmu_urb;
10.558 +}
10.559 +
10.560 +static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
10.561 +{
10.562 + int count = (end - start) >> 12;
10.563 + mem_region_fn_t *ptr = &sh4_address_space[start>>12];
10.564 + while( count-- > 0 ) {
10.565 + *ptr++ = fn;
10.566 + }
10.567 +}
10.568 +static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
10.569 +{
10.570 + int count = (end - start) >> 12;
10.571 + mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
10.572 + while( count-- > 0 ) {
10.573 + *ptr++ = fn;
10.574 + }
10.575 +}
10.576 +
10.577 +static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
10.578 +{
10.579 + int i;
10.580 + if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
10.581 + /* TLB on */
10.582 + sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
10.583 + sh4_address_space[(page|0xA0000000)>>12] = fn;
10.584 + /* Scan UTLB and update any direct-referencing entries */
10.585 + } else {
10.586 + /* Direct map to U0, P0, P1, P2, P3 */
10.587 + for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
10.588 + sh4_address_space[(page|i)>>12] = fn;
10.589 + }
10.590 + for( i=0; i < 0x80000000; i+= 0x20000000 ) {
10.591 + sh4_user_address_space[(page|i)>>12] = fn;
10.592 + }
10.593 + }
10.594 +}
10.595 +
10.596 +static void mmu_set_tlb_enabled( int tlb_on )
10.597 +{
10.598 + mem_region_fn_t *ptr, *uptr;
10.599 + int i;
10.600 +
10.601 + if( tlb_on ) {
10.602 + mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
10.603 + mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
10.604 + mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
10.605 + for( i=0, ptr = storequeue_address_space, uptr = storequeue_user_address_space;
10.606 + i<0x04000000; i+= LXDREAM_PAGE_SIZE ) {
10.607 + *ptr++ = &mem_region_tlb_miss;
10.608 + *uptr++ = &mem_region_tlb_miss;
10.609 + }
10.610 + mmu_utlb_register_all();
10.611 + } else {
10.612 + for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
10.613 + memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
10.614 + }
10.615 + for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
10.616 + memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
10.617 + }
10.618 + }
10.619 +}
10.620 +
10.621 +static void mmu_set_storequeue_protected( int protected )
10.622 +{
10.623 + if( protected ) {
10.624 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &mem_region_address_error );
10.625 + } else {
10.626 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
10.627 + }
10.628 +}
10.629 +
10.630 +static void mmu_set_tlb_asid( uint32_t asid )
10.631 +{
10.632 + /* Scan for pages that need to be remapped */
10.633 + int i;
10.634 + if( IS_SV_ENABLED() ) {
10.635 + // FIXME: Priv pages don't change - only user pages are mapped in/out
10.636 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
10.637 + if( mmu_utlb[i].flags & TLB_VALID ) {
10.638 + if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
10.639 + if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
10.640 + mmu_utlb_unmap_pages( TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
10.641 + get_tlb_size_pages(mmu_utlb[i].flags) );
10.642 + } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
10.643 + mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
10.644 + mmu_utlb[i].vpn&mmu_utlb[i].mask,
10.645 + get_tlb_size_pages(mmu_utlb[i].flags) );
10.646 + }
10.647 + }
10.648 + }
10.649 + }
10.650 + } else {
10.651 + // Remap both Priv+user pages
10.652 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
10.653 + if( mmu_utlb[i].flags & TLB_VALID ) {
10.654 + if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
10.655 + if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
10.656 + mmu_utlb_unmap_pages( TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
10.657 + get_tlb_size_pages(mmu_utlb[i].flags) );
10.658 + } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
10.659 + mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
10.660 + mmu_utlb[i].vpn&mmu_utlb[i].mask,
10.661 + get_tlb_size_pages(mmu_utlb[i].flags) );
10.662 + }
10.663 + }
10.664 + }
10.665 + }
10.666 + }
10.667 +
10.668 + mmu_asid = asid;
10.669 +}
10.670 +
10.671 +static uint32_t get_tlb_size_mask( uint32_t flags )
10.672 +{
10.673 + switch( flags & TLB_SIZE_MASK ) {
10.674 + case TLB_SIZE_1K: return MASK_1K;
10.675 + case TLB_SIZE_4K: return MASK_4K;
10.676 + case TLB_SIZE_64K: return MASK_64K;
10.677 + case TLB_SIZE_1M: return MASK_1M;
10.678 + default: return 0; /* Unreachable */
10.679 + }
10.680 +}
10.681 +static uint32_t get_tlb_size_pages( uint32_t flags )
10.682 +{
10.683 + switch( flags & TLB_SIZE_MASK ) {
10.684 + case TLB_SIZE_1K: return 0;
10.685 + case TLB_SIZE_4K: return 1;
10.686 + case TLB_SIZE_64K: return 16;
10.687 + case TLB_SIZE_1M: return 256;
10.688 + default: return 0; /* Unreachable */
10.689 + }
10.690 +}
10.691 +
10.692 +/**
10.693 + * Add a new TLB entry mapping to the address space table. If any of the pages
10.694 + * are already mapped, they are mapped to the TLB multi-hit page instead.
10.695 + * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
10.696 + */
10.697 +static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
10.698 +{
10.699 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
10.700 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
10.701 + gboolean mapping_ok = TRUE;
10.702 + int i;
10.703 +
10.704 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
10.705 + /* Storequeue mapping */
10.706 + ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
10.707 + uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
10.708 + } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
10.709 + user_page = NULL; /* No user access to P3 region */
10.710 + } else if( start_addr >= 0x80000000 ) {
10.711 + return TRUE; // No mapping - legal but meaningless
10.712 + }
10.713 +
10.714 + if( npages == 0 ) {
10.715 + struct utlb_1k_entry *ent;
10.716 + int i, idx = (start_addr >> 10) & 0x03;
10.717 + if( IS_1K_PAGE_ENTRY(*ptr) ) {
10.718 + ent = (struct utlb_1k_entry *)*ptr;
10.719 + } else {
10.720 + ent = mmu_utlb_1k_alloc();
10.721 + /* New 1K struct - init to previous contents of region */
10.722 + for( i=0; i<4; i++ ) {
10.723 + ent->subpages[i] = *ptr;
10.724 + ent->user_subpages[i] = *uptr;
10.725 + }
10.726 + *ptr = &ent->fn;
10.727 + *uptr = &ent->user_fn;
10.728 + }
10.729 +
10.730 + if( priv_page != NULL ) {
10.731 + if( ent->subpages[idx] == &mem_region_tlb_miss ) {
10.732 + ent->subpages[idx] = priv_page;
10.733 + } else {
10.734 + mapping_ok = FALSE;
10.735 + ent->subpages[idx] = &mem_region_tlb_multihit;
10.736 + }
10.737 + }
10.738 + if( user_page != NULL ) {
10.739 + if( ent->user_subpages[idx] == &mem_region_tlb_miss ) {
10.740 + ent->user_subpages[idx] = user_page;
10.741 + } else {
10.742 + mapping_ok = FALSE;
10.743 + ent->user_subpages[idx] = &mem_region_tlb_multihit;
10.744 + }
10.745 + }
10.746 +
10.747 + } else {
10.748 +
10.749 + if( user_page == NULL ) {
10.750 + /* Privileged mapping only */
10.751 + for( i=0; i<npages; i++ ) {
10.752 + if( *ptr == &mem_region_tlb_miss ) {
10.753 + *ptr++ = priv_page;
10.754 + } else {
10.755 + mapping_ok = FALSE;
10.756 + *ptr++ = &mem_region_tlb_multihit;
10.757 + }
10.758 + }
10.759 + } else if( priv_page == NULL ) {
10.760 + /* User mapping only (eg ASID change remap) */
10.761 + for( i=0; i<npages; i++ ) {
10.762 + if( *uptr == &mem_region_tlb_miss ) {
10.763 + *uptr++ = user_page;
10.764 + } else {
10.765 + mapping_ok = FALSE;
10.766 + *uptr++ = &mem_region_tlb_multihit;
10.767 + }
10.768 + }
10.769 + } else {
10.770 + for( i=0; i<npages; i++ ) {
10.771 + if( *ptr == &mem_region_tlb_miss ) {
10.772 + *ptr++ = priv_page;
10.773 + *uptr++ = user_page;
10.774 + } else {
10.775 + mapping_ok = FALSE;
10.776 + *ptr++ = &mem_region_tlb_multihit;
10.777 + *uptr++ = &mem_region_tlb_multihit;
10.778 + }
10.779 + }
10.780 + }
10.781 + }
10.782 + return mapping_ok;
10.783 +}
10.784 +
10.785 +/**
10.786 + * Remove a previous TLB mapping (replacing them with the TLB miss region).
10.787 + * @return FALSE if any pages were previously mapped to the TLB multihit page,
10.788 + * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
10.789 + */
10.790 +static gboolean mmu_utlb_unmap_pages( gboolean unmap_user, sh4addr_t start_addr, int npages )
10.791 +{
10.792 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
10.793 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
10.794 + gboolean unmapping_ok = TRUE;
10.795 + int i;
10.796 +
10.797 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
10.798 + /* Storequeue mapping */
10.799 + ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
10.800 + uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
10.801 + } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
10.802 + unmap_user = FALSE;
10.803 + } else if( start_addr >= 0x80000000 ) {
10.804 + return TRUE; // No mapping - legal but meaningless
10.805 + }
10.806 +
10.807 + if( npages == 0 ) { // 1K page
10.808 + assert( IS_1K_PAGE_ENTRY( *ptr ) );
10.809 + struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
10.810 + int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
10.811 + if( ent->subpages[idx] == &mem_region_tlb_multihit ) {
10.812 + unmapping_ok = FALSE;
10.813 + }
10.814 + ent->subpages[idx] = &mem_region_tlb_miss;
10.815 + ent->user_subpages[idx] = &mem_region_tlb_miss;
10.816 +
10.817 + /* If all 4 subpages have the same content, merge them together and
10.818 + * release the 1K entry
10.819 + */
10.820 + mem_region_fn_t priv_page = ent->subpages[0];
10.821 + mem_region_fn_t user_page = ent->user_subpages[0];
10.822 + for( i=1; i<4; i++ ) {
10.823 + if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
10.824 + mergeable = 0;
10.825 + break;
10.826 + }
10.827 + }
10.828 + if( mergeable ) {
10.829 + mmu_utlb_1k_free(ent);
10.830 + *ptr = priv_page;
10.831 + *uptr = user_page;
10.832 + }
10.833 + } else {
10.834 + if( !unmap_user ) {
10.835 + /* Privileged (un)mapping only */
10.836 + for( i=0; i<npages; i++ ) {
10.837 + if( *ptr == &mem_region_tlb_multihit ) {
10.838 + unmapping_ok = FALSE;
10.839 + }
10.840 + *ptr++ = &mem_region_tlb_miss;
10.841 + }
10.842 + } else {
10.843 + for( i=0; i<npages; i++ ) {
10.844 + if( *ptr == &mem_region_tlb_multihit ) {
10.845 + unmapping_ok = FALSE;
10.846 + }
10.847 + *ptr++ = &mem_region_tlb_miss;
10.848 + *uptr++ = &mem_region_tlb_miss;
10.849 + }
10.850 + }
10.851 + }
10.852 + return unmapping_ok;
10.853 +}
10.854 +
10.855 +static void mmu_utlb_insert_entry( int entry )
10.856 +{
10.857 + struct utlb_entry *ent = &mmu_utlb[entry];
10.858 + mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
10.859 + mem_region_fn_t upage;
10.860 + sh4addr_t start_addr = ent->vpn & ent->mask;
10.861 + int npages = get_tlb_size_pages(ent->flags);
10.862 +
10.863 + if( (ent->flags & TLB_USERMODE) == 0 ) {
10.864 + upage = &mem_region_user_protected;
10.865 + } else {
10.866 + upage = page;
10.867 + }
10.868 + mmu_utlb_pages[entry].user_fn = upage;
10.869 +
10.870 + if( (ent->flags & TLB_WRITABLE) == 0 ) {
10.871 + page->write_long = (mem_write_fn_t)tlb_protected_write;
10.872 + page->write_word = (mem_write_fn_t)tlb_protected_write;
10.873 + page->write_byte = (mem_write_fn_t)tlb_protected_write;
10.874 + page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
10.875 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
10.876 + } else if( (ent->flags & TLB_DIRTY) == 0 ) {
10.877 + page->write_long = (mem_write_fn_t)tlb_initial_write;
10.878 + page->write_word = (mem_write_fn_t)tlb_initial_write;
10.879 + page->write_byte = (mem_write_fn_t)tlb_initial_write;
10.880 + page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
10.881 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
10.882 + } else {
10.883 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
10.884 + }
10.885 +
10.886 + /* Is page visible? */
10.887 + if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
10.888 + mmu_utlb_map_pages( page, upage, start_addr, npages );
10.889 + } else if( IS_SV_ENABLED() ) {
10.890 + mmu_utlb_map_pages( page, NULL, start_addr, npages );
10.891 + }
10.892 +}
10.893 +
10.894 +static void mmu_utlb_remove_entry( int entry )
10.895 +{
10.896 + int i, j;
10.897 + struct utlb_entry *ent = &mmu_utlb[entry];
10.898 + sh4addr_t start_addr = ent->vpn&ent->mask;
10.899 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
10.900 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
10.901 + gboolean unmap_user;
10.902 + int npages = get_tlb_size_pages(ent->flags);
10.903 +
10.904 + if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
10.905 + unmap_user = TRUE;
10.906 + } else if( IS_SV_ENABLED() ) {
10.907 + unmap_user = FALSE;
10.908 + } else {
10.909 + return; // Not mapped
10.910 + }
10.911 +
10.912 + gboolean clean_unmap = mmu_utlb_unmap_pages( unmap_user, start_addr, npages );
10.913 +
10.914 + if( !clean_unmap ) {
10.915 + /* If we ran into a multi-hit, we now need to rescan the UTLB for the other entries
10.916 + * and remap them */
10.917 + for( j=0; j<UTLB_ENTRY_COUNT; j++ ) {
10.918 + uint32_t mask = MIN(mmu_utlb[j].mask, ent->mask);
10.919 + if( j != entry && (start_addr & mask) == (mmu_utlb[j].vpn & mask) ) {
10.920 +
10.921 + }
10.922 + }
10.923 + }
10.924 +}
10.925 +
10.926 +static void mmu_utlb_register_all()
10.927 +{
10.928 + int i;
10.929 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
10.930 + if( mmu_utlb[i].flags & TLB_VALID )
10.931 + mmu_utlb_insert_entry( i );
10.932 + }
10.933 +}
10.934 +
10.935 static void mmu_invalidate_tlb()
10.936 {
10.937 int i;
10.938 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
10.939 mmu_itlb[i].flags &= (~TLB_VALID);
10.940 }
10.941 + if( IS_TLB_ENABLED() ) {
10.942 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
10.943 + if( mmu_utlb[i].flags & TLB_VALID ) {
10.944 + mmu_utlb_remove_entry( i );
10.945 + }
10.946 + }
10.947 + }
10.948 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
10.949 mmu_utlb[i].flags &= (~TLB_VALID);
10.950 }
10.951 - mmu_utlb_entries = 0;
10.952 -}
10.953 -
10.954 -#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
10.955 -
10.956 -int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
10.957 -{
10.958 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
10.959 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
10.960 -}
10.961 -int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
10.962 -{
10.963 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
10.964 - return (ent->ppn & 0x1FFFFC00) | ent->flags;
10.965 -}
10.966 -
10.967 -void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
10.968 -{
10.969 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
10.970 - ent->vpn = val & 0xFFFFFC00;
10.971 - ent->asid = val & 0x000000FF;
10.972 - ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
10.973 -}
10.974 -
10.975 -void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
10.976 -{
10.977 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
10.978 - ent->ppn = val & 0x1FFFFC00;
10.979 - ent->flags = val & 0x00001DA;
10.980 - ent->mask = get_mask_for_flags(val);
10.981 - if( ent->ppn >= 0x1C000000 )
10.982 - ent->ppn |= 0xE0000000;
10.983 -}
10.984 -
10.985 -#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
10.986 -#define UTLB_ASSOC(addr) (addr&0x80)
10.987 -#define UTLB_DATA2(addr) (addr&0x00800000)
10.988 -
10.989 -int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
10.990 -{
10.991 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
10.992 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
10.993 - ((ent->flags & TLB_DIRTY)<<7);
10.994 -}
10.995 -int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
10.996 -{
10.997 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
10.998 - if( UTLB_DATA2(addr) ) {
10.999 - return ent->pcmcia;
10.1000 - } else {
10.1001 - return (ent->ppn&0x1FFFFC00) | ent->flags;
10.1002 - }
10.1003 -}
10.1004 -
10.1005 -/**
10.1006 - * Find a UTLB entry for the associative TLB write - same as the normal
10.1007 - * lookup but ignores the valid bit.
10.1008 - */
10.1009 -static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
10.1010 -{
10.1011 - int result = -1;
10.1012 - unsigned int i;
10.1013 - for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
10.1014 - if( (mmu_utlb[i].flags & TLB_VALID) &&
10.1015 - ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
10.1016 - ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
10.1017 - if( result != -1 ) {
10.1018 - fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
10.1019 - return -2;
10.1020 - }
10.1021 - result = i;
10.1022 - }
10.1023 - }
10.1024 - return result;
10.1025 -}
10.1026 -
10.1027 -/**
10.1028 - * Find a ITLB entry for the associative TLB write - same as the normal
10.1029 - * lookup but ignores the valid bit.
10.1030 - */
10.1031 -static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
10.1032 -{
10.1033 - int result = -1;
10.1034 - unsigned int i;
10.1035 - for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
10.1036 - if( (mmu_itlb[i].flags & TLB_VALID) &&
10.1037 - ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
10.1038 - ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
10.1039 - if( result != -1 ) {
10.1040 - return -2;
10.1041 - }
10.1042 - result = i;
10.1043 - }
10.1044 - }
10.1045 - return result;
10.1046 -}
10.1047 -
10.1048 -void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
10.1049 -{
10.1050 - if( UTLB_ASSOC(addr) ) {
10.1051 - int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
10.1052 - if( utlb >= 0 ) {
10.1053 - struct utlb_entry *ent = &mmu_utlb[utlb];
10.1054 - uint32_t old_flags = ent->flags;
10.1055 - ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
10.1056 - ent->flags |= (val & TLB_VALID);
10.1057 - ent->flags |= ((val & 0x200)>>7);
10.1058 - if( (old_flags & TLB_VALID) && !(ent->flags&TLB_VALID) ) {
10.1059 - mmu_utlb_remove_entry( utlb );
10.1060 - } else if( !(old_flags & TLB_VALID) && (ent->flags&TLB_VALID) ) {
10.1061 - mmu_utlb_insert_entry( utlb );
10.1062 - }
10.1063 - }
10.1064 -
10.1065 - int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
10.1066 - if( itlb >= 0 ) {
10.1067 - struct itlb_entry *ent = &mmu_itlb[itlb];
10.1068 - ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
10.1069 - }
10.1070 -
10.1071 - if( itlb == -2 || utlb == -2 ) {
10.1072 - MMU_TLB_MULTI_HIT_ERROR(addr);
10.1073 - return;
10.1074 - }
10.1075 - } else {
10.1076 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
10.1077 - if( ent->flags & TLB_VALID )
10.1078 - mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
10.1079 - ent->vpn = (val & 0xFFFFFC00);
10.1080 - ent->asid = (val & 0xFF);
10.1081 - ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
10.1082 - ent->flags |= (val & TLB_VALID);
10.1083 - ent->flags |= ((val & 0x200)>>7);
10.1084 - if( ent->flags & TLB_VALID )
10.1085 - mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
10.1086 - }
10.1087 -}
10.1088 -
10.1089 -void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
10.1090 -{
10.1091 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
10.1092 - if( UTLB_DATA2(addr) ) {
10.1093 - ent->pcmcia = val & 0x0000000F;
10.1094 - } else {
10.1095 - if( ent->flags & TLB_VALID )
10.1096 - mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
10.1097 - ent->ppn = (val & 0x1FFFFC00);
10.1098 - ent->flags = (val & 0x000001FF);
10.1099 - ent->mask = get_mask_for_flags(val);
10.1100 - if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
10.1101 - mmu_utlb[mmu_urc].ppn |= 0xE0000000;
10.1102 - if( ent->flags & TLB_VALID )
10.1103 - mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
10.1104 - }
10.1105 }
10.1106
10.1107 /******************************************************************************/
10.1108 @@ -532,9 +743,22 @@
10.1109 /******************************************************************************/
10.1110
10.1111 /**
10.1112 - * The translations are excessively complicated, but unfortunately it's a
10.1113 - * complicated system. TODO: make this not be painfully slow.
10.1114 + * Translate a 32-bit address into a UTLB entry number. Does not check for
10.1115 + * page protection etc.
10.1116 + * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
10.1117 */
10.1118 +int mmu_utlb_entry_for_vpn( uint32_t vpn )
10.1119 +{
10.1120 + mem_region_fn_t fn = sh4_address_space[vpn>>12];
10.1121 + if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
10.1122 + return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
10.1123 + } else if( fn == &mem_region_tlb_multihit ) {
10.1124 + return -2;
10.1125 + } else {
10.1126 + return -1;
10.1127 + }
10.1128 +}
10.1129 +
10.1130
10.1131 /**
10.1132 * Perform the actual utlb lookup w/ asid matching.
10.1133 @@ -656,7 +880,7 @@
10.1134 }
10.1135
10.1136 if( result == -1 ) {
10.1137 - int utlbEntry = mmu_utlb_sorted_find( vpn );
10.1138 + int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
10.1139 if( utlbEntry < 0 ) {
10.1140 return utlbEntry;
10.1141 } else {
10.1142 @@ -717,130 +941,6 @@
10.1143 return result;
10.1144 }
10.1145
10.1146 -#ifdef HAVE_FRAME_ADDRESS
10.1147 -sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc )
10.1148 -#else
10.1149 -sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
10.1150 -#endif
10.1151 -{
10.1152 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
10.1153 - if( addr & 0x80000000 ) {
10.1154 - if( IS_SH4_PRIVMODE() ) {
10.1155 - if( addr >= 0xE0000000 ) {
10.1156 - return addr; /* P4 - passthrough */
10.1157 - } else if( addr < 0xC0000000 ) {
10.1158 - /* P1, P2 regions are pass-through (no translation) */
10.1159 - return VMA_TO_EXT_ADDR(addr);
10.1160 - }
10.1161 - } else {
10.1162 - if( addr >= 0xE0000000 && addr < 0xE4000000 &&
10.1163 - ((mmucr&MMUCR_SQMD) == 0) ) {
10.1164 - /* Conditional user-mode access to the store-queue (no translation) */
10.1165 - return addr;
10.1166 - }
10.1167 - MMU_READ_ADDR_ERROR();
10.1168 - RETURN_VIA(exc);
10.1169 - }
10.1170 - }
10.1171 -
10.1172 - if( (mmucr & MMUCR_AT) == 0 ) {
10.1173 - return VMA_TO_EXT_ADDR(addr);
10.1174 - }
10.1175 -
10.1176 - /* If we get this far, translation is required */
10.1177 - int entryNo;
10.1178 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
10.1179 - entryNo = mmu_utlb_sorted_find( addr );
10.1180 - } else {
10.1181 - entryNo = mmu_utlb_lookup_vpn( addr );
10.1182 - }
10.1183 -
10.1184 - switch(entryNo) {
10.1185 - case -1:
10.1186 - MMU_TLB_READ_MISS_ERROR(addr);
10.1187 - RETURN_VIA(exc);
10.1188 - case -2:
10.1189 - MMU_TLB_MULTI_HIT_ERROR(addr);
10.1190 - RETURN_VIA(exc);
10.1191 - default:
10.1192 - if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
10.1193 - !IS_SH4_PRIVMODE() ) {
10.1194 - /* protection violation */
10.1195 - MMU_TLB_READ_PROT_ERROR(addr);
10.1196 - RETURN_VIA(exc);
10.1197 - }
10.1198 -
10.1199 - /* finally generate the target address */
10.1200 - return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
10.1201 - (addr & (~mmu_utlb[entryNo].mask));
10.1202 - }
10.1203 -}
10.1204 -
10.1205 -#ifdef HAVE_FRAME_ADDRESS
10.1206 -sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc )
10.1207 -#else
10.1208 -sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
10.1209 -#endif
10.1210 -{
10.1211 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
10.1212 - if( addr & 0x80000000 ) {
10.1213 - if( IS_SH4_PRIVMODE() ) {
10.1214 - if( addr >= 0xE0000000 ) {
10.1215 - return addr; /* P4 - passthrough */
10.1216 - } else if( addr < 0xC0000000 ) {
10.1217 - /* P1, P2 regions are pass-through (no translation) */
10.1218 - return VMA_TO_EXT_ADDR(addr);
10.1219 - }
10.1220 - } else {
10.1221 - if( addr >= 0xE0000000 && addr < 0xE4000000 &&
10.1222 - ((mmucr&MMUCR_SQMD) == 0) ) {
10.1223 - /* Conditional user-mode access to the store-queue (no translation) */
10.1224 - return addr;
10.1225 - }
10.1226 - MMU_WRITE_ADDR_ERROR();
10.1227 - RETURN_VIA(exc);
10.1228 - }
10.1229 - }
10.1230 -
10.1231 - if( (mmucr & MMUCR_AT) == 0 ) {
10.1232 - return VMA_TO_EXT_ADDR(addr);
10.1233 - }
10.1234 -
10.1235 - /* If we get this far, translation is required */
10.1236 - int entryNo;
10.1237 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
10.1238 - entryNo = mmu_utlb_sorted_find( addr );
10.1239 - } else {
10.1240 - entryNo = mmu_utlb_lookup_vpn( addr );
10.1241 - }
10.1242 -
10.1243 - switch(entryNo) {
10.1244 - case -1:
10.1245 - MMU_TLB_WRITE_MISS_ERROR(addr);
10.1246 - RETURN_VIA(exc);
10.1247 - case -2:
10.1248 - MMU_TLB_MULTI_HIT_ERROR(addr);
10.1249 - RETURN_VIA(exc);
10.1250 - default:
10.1251 - if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
10.1252 - : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
10.1253 - /* protection violation */
10.1254 - MMU_TLB_WRITE_PROT_ERROR(addr);
10.1255 - RETURN_VIA(exc);
10.1256 - }
10.1257 -
10.1258 - if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
10.1259 - MMU_TLB_INITIAL_WRITE_ERROR(addr);
10.1260 - RETURN_VIA(exc);
10.1261 - }
10.1262 -
10.1263 - /* finally generate the target address */
10.1264 - sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
10.1265 - (addr & (~mmu_utlb[entryNo].mask));
10.1266 - return pma;
10.1267 - }
10.1268 -}
10.1269 -
10.1270 /**
10.1271 * Update the icache for an untranslated address
10.1272 */
10.1273 @@ -886,7 +986,7 @@
10.1274 mmu_update_icache_phys(addr);
10.1275 return TRUE;
10.1276 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
10.1277 - MMU_READ_ADDR_ERROR();
10.1278 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
10.1279 return FALSE;
10.1280 }
10.1281 }
10.1282 @@ -903,7 +1003,7 @@
10.1283 entryNo = mmu_itlb_lookup_vpn( addr );
10.1284 } else {
10.1285 if( addr & 0x80000000 ) {
10.1286 - MMU_READ_ADDR_ERROR();
10.1287 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
10.1288 return FALSE;
10.1289 }
10.1290
10.1291 @@ -916,17 +1016,17 @@
10.1292 entryNo = mmu_itlb_lookup_vpn_asid( addr );
10.1293
10.1294 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
10.1295 - MMU_TLB_READ_PROT_ERROR(addr);
10.1296 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
10.1297 return FALSE;
10.1298 }
10.1299 }
10.1300
10.1301 switch(entryNo) {
10.1302 case -1:
10.1303 - MMU_TLB_READ_MISS_ERROR(addr);
10.1304 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
10.1305 return FALSE;
10.1306 case -2:
10.1307 - MMU_TLB_MULTI_HIT_ERROR(addr);
10.1308 + RAISE_TLB_MULTIHIT_ERROR(addr);
10.1309 return FALSE;
10.1310 default:
10.1311 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
10.1312 @@ -985,46 +1085,309 @@
10.1313 ext_address_space[target>>12]->write_burst( target, src );
10.1314 }
10.1315
10.1316 -gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )
10.1317 +void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc )
10.1318 {
10.1319 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
10.1320 int queue = (addr&0x20)>>2;
10.1321 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
10.1322 sh4addr_t target;
10.1323 /* Store queue operation */
10.1324 -
10.1325 - int entryNo;
10.1326 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
10.1327 - entryNo = mmu_utlb_lookup_vpn_asid( addr );
10.1328 - } else {
10.1329 - entryNo = mmu_utlb_lookup_vpn( addr );
10.1330 - }
10.1331 - switch(entryNo) {
10.1332 - case -1:
10.1333 - MMU_TLB_WRITE_MISS_ERROR(addr);
10.1334 - return FALSE;
10.1335 - case -2:
10.1336 - MMU_TLB_MULTI_HIT_ERROR(addr);
10.1337 - return FALSE;
10.1338 - default:
10.1339 - if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
10.1340 - : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
10.1341 - /* protection violation */
10.1342 - MMU_TLB_WRITE_PROT_ERROR(addr);
10.1343 - return FALSE;
10.1344 - }
10.1345 -
10.1346 - if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
10.1347 - MMU_TLB_INITIAL_WRITE_ERROR(addr);
10.1348 - return FALSE;
10.1349 - }
10.1350 -
10.1351 - /* finally generate the target address */
10.1352 - target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
10.1353 - (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
10.1354 - }
10.1355 -
10.1356 - ext_address_space[target>>12]->write_burst( target, src );
10.1357 - return TRUE;
10.1358 + storequeue_address_space[(addr&0x03FFFFFE0)>>12]->write_burst( addr, src);
10.1359 }
10.1360
10.1361 +/********************** TLB Direct-Access Regions ***************************/
10.1362 +#ifdef HAVE_FRAME_ADDRESS
10.1363 +#define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
10.1364 +#else
10.1365 +#define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
10.1366 +#endif
10.1367 +
10.1368 +
10.1369 +#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
10.1370 +
10.1371 +int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
10.1372 +{
10.1373 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
10.1374 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
10.1375 +}
10.1376 +
10.1377 +void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
10.1378 +{
10.1379 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
10.1380 + ent->vpn = val & 0xFFFFFC00;
10.1381 + ent->asid = val & 0x000000FF;
10.1382 + ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
10.1383 +}
10.1384 +
10.1385 +int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
10.1386 +{
10.1387 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
10.1388 + return (ent->ppn & 0x1FFFFC00) | ent->flags;
10.1389 +}
10.1390 +
10.1391 +void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
10.1392 +{
10.1393 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
10.1394 + ent->ppn = val & 0x1FFFFC00;
10.1395 + ent->flags = val & 0x00001DA;
10.1396 + ent->mask = get_tlb_size_mask(val);
10.1397 + if( ent->ppn >= 0x1C000000 )
10.1398 + ent->ppn |= 0xE0000000;
10.1399 +}
10.1400 +
10.1401 +#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
10.1402 +#define UTLB_ASSOC(addr) (addr&0x80)
10.1403 +#define UTLB_DATA2(addr) (addr&0x00800000)
10.1404 +
10.1405 +int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
10.1406 +{
10.1407 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
10.1408 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
10.1409 + ((ent->flags & TLB_DIRTY)<<7);
10.1410 +}
10.1411 +int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
10.1412 +{
10.1413 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
10.1414 + if( UTLB_DATA2(addr) ) {
10.1415 + return ent->pcmcia;
10.1416 + } else {
10.1417 + return (ent->ppn&0x1FFFFC00) | ent->flags;
10.1418 + }
10.1419 +}
10.1420 +
10.1421 +/**
10.1422 + * Find a UTLB entry for the associative TLB write - same as the normal
10.1423 + * lookup but ignores the valid bit.
10.1424 + */
10.1425 +static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
10.1426 +{
10.1427 + int result = -1;
10.1428 + unsigned int i;
10.1429 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
10.1430 + if( (mmu_utlb[i].flags & TLB_VALID) &&
10.1431 + ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
10.1432 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
10.1433 + if( result != -1 ) {
10.1434 + fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
10.1435 + return -2;
10.1436 + }
10.1437 + result = i;
10.1438 + }
10.1439 + }
10.1440 + return result;
10.1441 +}
10.1442 +
10.1443 +/**
10.1444 + * Find a ITLB entry for the associative TLB write - same as the normal
10.1445 + * lookup but ignores the valid bit.
10.1446 + */
10.1447 +static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
10.1448 +{
10.1449 + int result = -1;
10.1450 + unsigned int i;
10.1451 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
10.1452 + if( (mmu_itlb[i].flags & TLB_VALID) &&
10.1453 + ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
10.1454 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
10.1455 + if( result != -1 ) {
10.1456 + return -2;
10.1457 + }
10.1458 + result = i;
10.1459 + }
10.1460 + }
10.1461 + return result;
10.1462 +}
10.1463 +
10.1464 +void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
10.1465 +{
10.1466 + if( UTLB_ASSOC(addr) ) {
10.1467 + int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
10.1468 + if( utlb >= 0 ) {
10.1469 + struct utlb_entry *ent = &mmu_utlb[utlb];
10.1470 + uint32_t old_flags = ent->flags;
10.1471 + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
10.1472 + ent->flags |= (val & TLB_VALID);
10.1473 + ent->flags |= ((val & 0x200)>>7);
10.1474 + if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
10.1475 + if( old_flags & TLB_VALID )
10.1476 + mmu_utlb_remove_entry( utlb );
10.1477 + if( ent->flags & TLB_VALID )
10.1478 + mmu_utlb_insert_entry( utlb );
10.1479 + }
10.1480 + }
10.1481 +
10.1482 + int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
10.1483 + if( itlb >= 0 ) {
10.1484 + struct itlb_entry *ent = &mmu_itlb[itlb];
10.1485 + ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
10.1486 + }
10.1487 +
10.1488 + if( itlb == -2 || utlb == -2 ) {
10.1489 + RAISE_TLB_MULTIHIT_ERROR(addr);
10.1490 + EXCEPTION_EXIT();
10.1491 + return;
10.1492 + }
10.1493 + } else {
10.1494 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
10.1495 + if( ent->flags & TLB_VALID )
10.1496 + mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
10.1497 + ent->vpn = (val & 0xFFFFFC00);
10.1498 + ent->asid = (val & 0xFF);
10.1499 + ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
10.1500 + ent->flags |= (val & TLB_VALID);
10.1501 + ent->flags |= ((val & 0x200)>>7);
10.1502 + if( ent->flags & TLB_VALID )
10.1503 + mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
10.1504 + }
10.1505 +}
10.1506 +
10.1507 +void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
10.1508 +{
10.1509 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
10.1510 + if( UTLB_DATA2(addr) ) {
10.1511 + ent->pcmcia = val & 0x0000000F;
10.1512 + } else {
10.1513 + if( ent->flags & TLB_VALID )
10.1514 + mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
10.1515 + ent->ppn = (val & 0x1FFFFC00);
10.1516 + ent->flags = (val & 0x000001FF);
10.1517 + ent->mask = get_tlb_size_mask(val);
10.1518 + if( ent->flags & TLB_VALID )
10.1519 + mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
10.1520 + }
10.1521 +}
10.1522 +
10.1523 +struct mem_region_fn p4_region_itlb_addr = {
10.1524 + mmu_itlb_addr_read, mmu_itlb_addr_write,
10.1525 + mmu_itlb_addr_read, mmu_itlb_addr_write,
10.1526 + mmu_itlb_addr_read, mmu_itlb_addr_write,
10.1527 + unmapped_read_burst, unmapped_write_burst };
10.1528 +struct mem_region_fn p4_region_itlb_data = {
10.1529 + mmu_itlb_data_read, mmu_itlb_data_write,
10.1530 + mmu_itlb_data_read, mmu_itlb_data_write,
10.1531 + mmu_itlb_data_read, mmu_itlb_data_write,
10.1532 + unmapped_read_burst, unmapped_write_burst };
10.1533 +struct mem_region_fn p4_region_utlb_addr = {
10.1534 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
10.1535 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
10.1536 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
10.1537 + unmapped_read_burst, unmapped_write_burst };
10.1538 +struct mem_region_fn p4_region_utlb_data = {
10.1539 + mmu_utlb_data_read, mmu_utlb_data_write,
10.1540 + mmu_utlb_data_read, mmu_utlb_data_write,
10.1541 + mmu_utlb_data_read, mmu_utlb_data_write,
10.1542 + unmapped_read_burst, unmapped_write_burst };
10.1543 +
10.1544 +/********************** Error regions **************************/
10.1545 +
10.1546 +static void FASTCALL address_error_read( sh4addr_t addr, void *exc )
10.1547 +{
10.1548 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
10.1549 + EXCEPTION_EXIT();
10.1550 +}
10.1551 +
10.1552 +static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
10.1553 +{
10.1554 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
10.1555 + EXCEPTION_EXIT();
10.1556 +}
10.1557 +
10.1558 +static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
10.1559 +{
10.1560 + RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
10.1561 + EXCEPTION_EXIT();
10.1562 +}
10.1563 +
10.1564 +static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
10.1565 +{
10.1566 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
10.1567 + EXCEPTION_EXIT();
10.1568 +}
10.1569 +
10.1570 +static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
10.1571 +{
10.1572 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
10.1573 + EXCEPTION_EXIT();
10.1574 +}
10.1575 +
10.1576 +static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
10.1577 +{
10.1578 + RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
10.1579 + EXCEPTION_EXIT();
10.1580 +}
10.1581 +
10.1582 +static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
10.1583 +{
10.1584 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
10.1585 + EXCEPTION_EXIT();
10.1586 +}
10.1587 +
10.1588 +static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
10.1589 +{
10.1590 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
10.1591 + EXCEPTION_EXIT();
10.1592 +}
10.1593 +
10.1594 +static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
10.1595 +{
10.1596 + RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
10.1597 + EXCEPTION_EXIT();
10.1598 +}
10.1599 +
10.1600 +static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
10.1601 +{
10.1602 + RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
10.1603 + EXCEPTION_EXIT();
10.1604 +}
10.1605 +
10.1606 +static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
10.1607 +{
10.1608 + MMIO_WRITE(MMU, TEA, addr);
10.1609 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
10.1610 + sh4_raise_reset(EXC_TLB_MULTI_HIT);
10.1611 + EXCEPTION_EXIT();
10.1612 +}
10.1613 +
10.1614 +static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
10.1615 +{
10.1616 + MMIO_WRITE(MMU, TEA, addr);
10.1617 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
10.1618 + sh4_raise_reset(EXC_TLB_MULTI_HIT);
10.1619 + EXCEPTION_EXIT();
10.1620 +}
10.1621 +static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
10.1622 +{
10.1623 + MMIO_WRITE(MMU, TEA, addr);
10.1624 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
10.1625 + sh4_raise_reset(EXC_TLB_MULTI_HIT);
10.1626 + EXCEPTION_EXIT();
10.1627 +}
10.1628 +
10.1629 +/**
10.1630 + * Note: Per sec 4.6.4 of the SH7750 manual, SQ
10.1631 + */
10.1632 +struct mem_region_fn mem_region_address_error = {
10.1633 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
10.1634 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
10.1635 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
10.1636 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write };
10.1637 +
10.1638 +struct mem_region_fn mem_region_tlb_miss = {
10.1639 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
10.1640 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
10.1641 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
10.1642 + (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write };
10.1643 +
10.1644 +struct mem_region_fn mem_region_user_protected = {
10.1645 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
10.1646 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
10.1647 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
10.1648 + (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write };
10.1649 +
10.1650 +struct mem_region_fn mem_region_tlb_multihit = {
10.1651 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
10.1652 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
10.1653 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
10.1654 + (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write };
10.1655 +
10.1656 +
10.1657 +
10.1658 \ No newline at end of file
11.1 --- a/src/sh4/mmu.h Sat Dec 27 04:09:17 2008 +0000
11.2 +++ b/src/sh4/mmu.h Sat Jan 03 03:30:26 2009 +0000
11.3 @@ -29,6 +29,20 @@
11.4 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
11.5
11.6 /************************** UTLB/ITLB Definitions ***************************/
11.7 +/* mmucr register bits */
11.8 +#define MMUCR_AT 0x00000001 /* Address Translation enabled */
11.9 +#define MMUCR_TI 0x00000004 /* TLB invalidate (always read as 0) */
11.10 +#define MMUCR_SV 0x00000100 /* Single Virtual mode=1 / multiple virtual=0 */
11.11 +#define MMUCR_SQMD 0x00000200 /* Store queue mode bit (0=user, 1=priv only) */
11.12 +#define MMUCR_URC 0x0000FC00 /* UTLB access counter */
11.13 +#define MMUCR_URB 0x00FC0000 /* UTLB entry boundary */
11.14 +#define MMUCR_LRUI 0xFC000000 /* Least recently used ITLB */
11.15 +#define MMUCR_MASK 0xFCFCFF05
11.16 +#define MMUCR_RMASK 0xFCFCFF01 /* Read mask */
11.17 +
11.18 +#define IS_TLB_ENABLED() (MMIO_READ(MMU, MMUCR)&MMUCR_AT)
11.19 +#define IS_SV_ENABLED() (MMIO_READ(MMU,MMUCR)&MMUCR_SV)
11.20 +
11.21 #define ITLB_ENTRY_COUNT 4
11.22 #define UTLB_ENTRY_COUNT 64
11.23
11.24 @@ -66,14 +80,31 @@
11.25 uint32_t asid; // Process ID
11.26 sh4addr_t ppn; // Physical Page Number
11.27 uint32_t flags;
11.28 - uint32_t pcmcia; // extra pcmcia data - not used
11.29 + uint32_t pcmcia; // extra pcmcia data - not used in this implementation
11.30 };
11.31
11.32 -struct utlb_sort_entry {
11.33 - sh4addr_t key; // Masked VPN + ASID
11.34 - uint32_t mask; // Mask + 0x00FF
11.35 - int entryNo;
11.36 +#define TLB_FUNC_SIZE 48
11.37 +
11.38 +struct utlb_page_entry {
11.39 + struct mem_region_fn fn;
11.40 + mem_region_fn_t user_fn;
11.41 + mem_region_fn_t target;
11.42 + unsigned char code[TLB_FUNC_SIZE*8];
11.43 };
11.44 +
11.45 +struct utlb_1k_entry {
11.46 + struct mem_region_fn fn;
11.47 + struct mem_region_fn user_fn;
11.48 + struct mem_region_fn *subpages[4];
11.49 + struct mem_region_fn *user_subpages[4];
11.50 + unsigned char code[TLB_FUNC_SIZE*16];
11.51 +};
11.52 +
11.53 +void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable );
11.54 +void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *ent );
11.55 +
11.56 +extern uint32_t mmu_urc;
11.57 +extern uint32_t mmu_urb;
11.58
11.59 #ifdef __cplusplus
11.60 }
12.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
12.2 +++ b/src/sh4/mmux86.c Sat Jan 03 03:30:26 2009 +0000
12.3 @@ -0,0 +1,109 @@
12.4 +/**
12.5 + * $Id: mmux86.c 957 2008-12-27 03:14:59Z nkeynes $
12.6 + *
12.7 + * x86-specific MMU code - this emits simple TLB stubs for TLB indirection.
12.8 + *
12.9 + * Copyright (c) 2008 Nathan Keynes.
12.10 + *
12.11 + * This program is free software; you can redistribute it and/or modify
12.12 + * it under the terms of the GNU General Public License as published by
12.13 + * the Free Software Foundation; either version 2 of the License, or
12.14 + * (at your option) any later version.
12.15 + *
12.16 + * This program is distributed in the hope that it will be useful,
12.17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
12.18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12.19 + * GNU General Public License for more details.
12.20 + */
12.21 +
12.22 +#include "lxdream.h"
12.23 +#include "mem.h"
12.24 +#include "sh4/sh4core.h"
12.25 +#include "sh4/sh4mmio.h"
12.26 +#include "sh4/sh4trans.h"
12.27 +#include "sh4/mmu.h"
12.28 +#include "sh4/x86op.h"
12.29 +
12.30 +#if SIZEOF_VOID_P == 8
12.31 +#define ARG1 R_EDI
12.32 +#define ARG2 R_ESI
12.33 +#define DECODE() \
12.34 + MOV_imm64_r32((uintptr_t)ext_address_space, R_EAX); /* movq ptr, %rax */ \
12.35 + REXW(); OP(0x8B); OP(0x0C); OP(0xC8) /* movq [%rax + %rcx*8], %rcx */
12.36 +#else
12.37 +#define ARG1 R_EAX
12.38 +#define ARG2 R_EDX
12.39 +#define DECODE() \
12.40 + MOV_r32disp32x4_r32( R_ECX, (uintptr_t)ext_address_space, R_ECX );
12.41 +#endif
12.42 +
12.43 +void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
12.44 +{
12.45 + uint32_t mask = ent->mask;
12.46 + uint32_t ppn = ent->ppn & mask;
12.47 + int inc = writable ? 1 : 2;
12.48 + int i;
12.49 +
12.50 + xlat_output = page->code;
12.51 + uint8_t **fn = (uint8_t **)ext_address_space[ppn>>12];
12.52 + uint8_t **out = (uint8_t **)&page->fn;
12.53 +
12.54 + for( i=0; i<8; i+= inc, fn += inc, out += inc ) {
12.55 + *out = xlat_output;
12.56 +#if SIZEOF_VOID_P == 8
12.57 + MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
12.58 + OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
12.59 +#else
12.60 + OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
12.61 +#endif
12.62 + AND_imm32_r32( ~mask, ARG1 ); // 6
12.63 + OR_imm32_r32( ppn, ARG1 ); // 6
12.64 + if( ent->mask >= 0xFFFFF000 ) {
12.65 + // Maps to a single page, so jump directly there
12.66 + int rel = (*fn - xlat_output);
12.67 + JMP_rel( rel ); // 5
12.68 + } else {
12.69 + MOV_r32_r32( ARG1, R_ECX ); // 2
12.70 + SHR_imm8_r32( 12, R_ECX ); // 3
12.71 + DECODE(); // 14
12.72 + JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) ); // 3
12.73 + }
12.74 + }
12.75 +}
12.76 +
12.77 +void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
12.78 +{
12.79 + xlat_output = entry->code;
12.80 + int i;
12.81 + uint8_t **out = (uint8_t **)&entry->fn;
12.82 +
12.83 + for( i=0; i<8; i++, out++ ) {
12.84 + *out = xlat_output;
12.85 + MOV_r32_r32( ARG1, R_ECX );
12.86 + SHR_imm8_r32( 10, R_ECX );
12.87 + AND_imm8s_r32( 0x3, R_ECX );
12.88 +#if SIZEOF_VOID_P == 8
12.89 + MOV_imm64_r32( (uintptr_t)&entry->subpages[0], R_EAX );
12.90 + REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
12.91 +#else
12.92 + MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->subpages[0]), R_ECX );
12.93 +#endif
12.94 + JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) ); // 3
12.95 + }
12.96 +
12.97 + out = (uint8_t **)&entry->user_fn;
12.98 + for( i=0; i<8; i++, out++ ) {
12.99 + *out = xlat_output;
12.100 + MOV_r32_r32( ARG1, R_ECX );
12.101 + SHR_imm8_r32( 10, R_ECX );
12.102 + AND_imm8s_r32( 0x3, R_ECX );
12.103 +#if SIZEOF_VOID_P == 8
12.104 + MOV_imm64_r32( (uintptr_t)&entry->user_subpages[0], R_EAX );
12.105 + REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
12.106 +#else
12.107 + MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->user_subpages[0]), R_ECX );
12.108 +#endif
12.109 + JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) ); // 3
12.110 + }
12.111 +
12.112 +}
13.1 --- a/src/sh4/sh4.c Sat Dec 27 04:09:17 2008 +0000
13.2 +++ b/src/sh4/sh4.c Sat Jan 03 03:30:26 2009 +0000
13.3 @@ -78,9 +78,9 @@
13.4 void sh4_init(void)
13.5 {
13.6 register_io_regions( mmio_list_sh4mmio );
13.7 + MMU_init();
13.8 TMU_init();
13.9 xlat_cache_init();
13.10 - sh4_mem_init();
13.11 sh4_reset();
13.12 #ifdef ENABLE_SH4STATS
13.13 sh4_stats_reset();
13.14 @@ -167,9 +167,7 @@
13.15 sh4_sleep_run_slice(nanosecs);
13.16 break;
13.17 case CORE_EXIT_FLUSH_ICACHE:
13.18 -#ifdef SH4_TRANSLATOR
13.19 xlat_flush_cache();
13.20 -#endif
13.21 break;
13.22 }
13.23
14.1 --- a/src/sh4/sh4core.h Sat Dec 27 04:09:17 2008 +0000
14.2 +++ b/src/sh4/sh4core.h Sat Jan 03 03:30:26 2009 +0000
14.3 @@ -51,6 +51,9 @@
14.4 extern struct sh4_icache_struct sh4_icache;
14.5
14.6 extern struct mem_region_fn **sh4_address_space;
14.7 +extern struct mem_region_fn **sh4_user_address_space;
14.8 +extern struct mem_region_fn **storequeue_address_space;
14.9 +extern struct mem_region_fn **storequeue_user_address_space;
14.10
14.11 /**
14.12 * Test if a given address is contained in the current icache entry
14.13 @@ -102,11 +105,17 @@
14.14 #define CORE_EXIT_SLEEP 5
14.15
14.16 /**
14.17 - * SH4 vm-exit flag - exit the current block and flush all instruction caches (ie
14.18 + * SH4 vm-exit flag - exit the current block and flush all instruction caches (ie
14.19 * if address translation has changed)
14.20 */
14.21 #define CORE_EXIT_FLUSH_ICACHE 6
14.22
14.23 +/**
14.24 + * SH4 vm-exit flag - exit the current block following a taken exception. sh4r.spc
14.25 + * is fixed up by recovery rather than sh4r.pc.
14.26 + */
14.27 +#define CORE_EXIT_EXCEPTION 7
14.28 +
14.29 typedef uint32_t (*sh4_run_slice_fn)(uint32_t);
14.30
14.31 /* SH4 module functions */
14.32 @@ -222,7 +231,7 @@
14.33 void FASTCALL sh4_write_byte( sh4addr_t addr, uint32_t val );
14.34 int32_t sh4_read_phys_word( sh4addr_t addr );
14.35 void FASTCALL sh4_flush_store_queue( sh4addr_t addr );
14.36 -gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr );
14.37 +void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc );
14.38
14.39 /* SH4 Exceptions */
14.40 #define EXC_POWER_RESET 0x000 /* reset vector */
14.41 @@ -293,7 +302,21 @@
14.42 #define FPULf (sh4r.fpul.f)
14.43 #define FPULi (sh4r.fpul.i)
14.44
14.45 -#define SH4_WRITE_STORE_QUEUE(addr,val) sh4r.store_queue[(addr>>2)&0xF] = val;
14.46 +/**************** SH4 internal memory regions *****************/
14.47 +extern struct mem_region_fn p4_region_storequeue;
14.48 +extern struct mem_region_fn p4_region_itlb_addr;
14.49 +extern struct mem_region_fn p4_region_itlb_data;
14.50 +extern struct mem_region_fn p4_region_utlb_addr;
14.51 +extern struct mem_region_fn p4_region_utlb_data;
14.52 +extern struct mem_region_fn p4_region_icache_addr;
14.53 +extern struct mem_region_fn p4_region_icache_data;
14.54 +extern struct mem_region_fn p4_region_ocache_addr;
14.55 +extern struct mem_region_fn p4_region_ocache_data;
14.56 +extern struct mem_region_fn mem_region_address_error;
14.57 +extern struct mem_region_fn mem_region_tlb_miss;
14.58 +extern struct mem_region_fn mem_region_tlb_multihit;
14.59 +extern struct mem_region_fn mem_region_user_protected;
14.60 +
14.61
14.62 #ifdef __cplusplus
14.63 }
15.1 --- a/src/sh4/sh4core.in Sat Dec 27 04:09:17 2008 +0000
15.2 +++ b/src/sh4/sh4core.in Sat Jan 03 03:30:26 2009 +0000
15.3 @@ -164,23 +164,30 @@
15.4 #define CHECKDEST(p) if( (p) == 0 ) { ERROR( "%08X: Branch/jump to NULL, CPU halted", sh4r.pc ); sh4_core_exit(CORE_EXIT_HALT); return FALSE; }
15.5 #define CHECKSLOTILLEGAL() if(sh4r.in_delay_slot) return sh4_raise_exception(EXC_SLOT_ILLEGAL)
15.6
15.7 +#define ADDRSPACE (IS_SH4_PRIVMODE() ? sh4_address_space : sh4_user_address_space)
15.8 +#define SQADDRSPACE (IS_SH4_PRIVMODE() ? storequeue_address_space : storequeue_user_address_space)
15.9 +
15.10 #ifdef HAVE_FRAME_ADDRESS
15.11 static FASTCALL __attribute__((noinline)) void *__first_arg(void *a, void *b) { return a; }
15.12 #define INIT_EXCEPTIONS(label) goto *__first_arg(&&fnstart,&&label); fnstart:
15.13 -#define MMU_TRANSLATE_READ( addr ) memtmp = mmu_vma_to_phys_read(addr, &&except )
15.14 -#define MMU_TRANSLATE_WRITE( addr ) memtmp = mmu_vma_to_phys_write(addr, &&except )
15.15 +#define MEM_READ_BYTE( addr, val ) val = ((mem_read_exc_fn_t)ADDRSPACE[(addr)>>12]->read_byte)((addr), &&except)
15.16 +#define MEM_READ_WORD( addr, val ) val = ((mem_read_exc_fn_t)ADDRSPACE[(addr)>>12]->read_word)((addr), &&except)
15.17 +#define MEM_READ_LONG( addr, val ) val = ((mem_read_exc_fn_t)ADDRSPACE[(addr)>>12]->read_long)((addr), &&except)
15.18 +#define MEM_WRITE_BYTE( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_byte)((addr), (val), &&except)
15.19 +#define MEM_WRITE_WORD( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_word)((addr), (val), &&except)
15.20 +#define MEM_WRITE_LONG( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_long)((addr), (val), &&except)
15.21 #else
15.22 #define INIT_EXCEPTIONS(label)
15.23 -#define MMU_TRANSLATE_READ( addr ) if( (memtmp = mmu_vma_to_phys_read(addr)) == MMU_VMA_ERROR ) { return TRUE; }
15.24 -#define MMU_TRANSLATE_WRITE( addr ) if( (memtmp = mmu_vma_to_phys_write(addr)) == MMU_VMA_ERROR ) { return TRUE; }
15.25 +#define MEM_READ_BYTE( addr, val ) val = ADDRSPACE[(addr)>>12]->read_byte(addr)
15.26 +#define MEM_READ_WORD( addr, val ) val = ADDRSPACE[(addr)>>12]->read_word(addr)
15.27 +#define MEM_READ_LONG( addr, val ) val = ADDRSPACE[(addr)>>12]->read_long(addr)
15.28 +#define MEM_WRITE_BYTE( addr, val ) ADDRSPACE[(addr)>>12]->write_byte(addr, val)
15.29 +#define MEM_WRITE_WORD( addr, val ) ADDRSPACE[(addr)>>12]->write_word(addr, val)
15.30 +#define MEM_WRITE_LONG( addr, val ) ADDRSPACE[(addr)>>12]->write_long(addr, val)
15.31 #endif
15.32
15.33 -#define MEM_READ_BYTE( addr, val ) MMU_TRANSLATE_READ(addr); val = sh4_read_byte(memtmp)
15.34 -#define MEM_READ_WORD( addr, val ) MMU_TRANSLATE_READ(addr); val = sh4_read_word(memtmp)
15.35 -#define MEM_READ_LONG( addr, val ) MMU_TRANSLATE_READ(addr); val = sh4_read_long(memtmp)
15.36 -#define MEM_WRITE_BYTE( addr, val ) MMU_TRANSLATE_WRITE(addr); sh4_write_byte(memtmp, val)
15.37 -#define MEM_WRITE_WORD( addr, val ) MMU_TRANSLATE_WRITE(addr); sh4_write_word(memtmp, val)
15.38 -#define MEM_WRITE_LONG( addr, val ) MMU_TRANSLATE_WRITE(addr); sh4_write_long(memtmp, val)
15.39 +
15.40 +
15.41
15.42
15.43 #define FP_WIDTH (IS_FPU_DOUBLESIZE() ? 8 : 4)
15.44 @@ -188,34 +195,30 @@
15.45 #define MEM_FP_READ( addr, reg ) \
15.46 if( IS_FPU_DOUBLESIZE() ) { \
15.47 CHECKRALIGN64(addr); \
15.48 - MMU_TRANSLATE_READ(addr); \
15.49 if( reg & 1 ) { \
15.50 - *((uint32_t *)&XF((reg) & 0x0E)) = sh4_read_long(memtmp); \
15.51 - *((uint32_t *)&XF(reg)) = sh4_read_long(memtmp+4); \
15.52 + MEM_READ_LONG( addr, *((uint32_t *)&XF((reg) & 0x0E)) ); \
15.53 + MEM_READ_LONG( addr+4, *((uint32_t *)&XF(reg)) ); \
15.54 } else { \
15.55 - *((uint32_t *)&FR(reg)) = sh4_read_long(memtmp); \
15.56 - *((uint32_t *)&FR((reg) | 0x01)) = sh4_read_long(memtmp+4); \
15.57 + MEM_READ_LONG( addr, *((uint32_t *)&FR(reg)) ); \
15.58 + MEM_READ_LONG( addr+4, *((uint32_t *)&FR((reg)|0x01)) ); \
15.59 } \
15.60 } else { \
15.61 CHECKRALIGN32(addr); \
15.62 - MMU_TRANSLATE_READ(addr); \
15.63 - *((uint32_t *)&FR(reg)) = sh4_read_long(memtmp); \
15.64 + MEM_READ_LONG( addr, *((uint32_t *)&FR(reg)) ); \
15.65 }
15.66 #define MEM_FP_WRITE( addr, reg ) \
15.67 if( IS_FPU_DOUBLESIZE() ) { \
15.68 CHECKWALIGN64(addr); \
15.69 - MMU_TRANSLATE_WRITE(addr); \
15.70 if( reg & 1 ) { \
15.71 - sh4_write_long( memtmp, *((uint32_t *)&XF((reg)&0x0E)) ); \
15.72 - sh4_write_long( memtmp+4, *((uint32_t *)&XF(reg)) ); \
15.73 + MEM_WRITE_LONG( addr, *((uint32_t *)&XF((reg)&0x0E)) ); \
15.74 + MEM_WRITE_LONG( addr+4, *((uint32_t *)&XF(reg)) ); \
15.75 } else { \
15.76 - sh4_write_long( memtmp, *((uint32_t *)&FR(reg)) ); \
15.77 - sh4_write_long( memtmp+4, *((uint32_t *)&FR((reg)|0x01)) ); \
15.78 + MEM_WRITE_LONG( addr, *((uint32_t *)&FR(reg)) ); \
15.79 + MEM_WRITE_LONG( addr+4, *((uint32_t *)&FR((reg)|0x01)) ); \
15.80 } \
15.81 } else { \
15.82 CHECKWALIGN32(addr); \
15.83 - MMU_TRANSLATE_WRITE(addr); \
15.84 - sh4_write_long( memtmp, *((uint32_t *)&FR((reg))) ); \
15.85 + MEM_WRITE_LONG(addr, *((uint32_t *)&FR((reg))) ); \
15.86 }
15.87
15.88 gboolean sh4_execute_instruction( void )
16.1 --- a/src/sh4/sh4dasm.in Sat Dec 27 04:09:17 2008 +0000
16.2 +++ b/src/sh4/sh4dasm.in Sat Jan 03 03:30:26 2009 +0000
16.3 @@ -51,7 +51,8 @@
16.4 uint32_t sh4_disasm_instruction( sh4vma_t pc, char *buf, int len, char *opcode )
16.5 {
16.6 sh4addr_t addr = mmu_vma_to_phys_disasm(pc);
16.7 - uint16_t ir = sh4_read_word(addr);
16.8 + uint32_t tmp;
16.9 + uint16_t ir = ext_address_space[addr>>12]->read_word(addr);
16.10
16.11 #define UNDEF(ir) snprintf( buf, len, "???? " );
16.12 #define RN(ir) ((ir&0x0F00)>>8)
16.13 @@ -189,7 +190,10 @@
16.14 MOV.L @Rm+, Rn {: snprintf( buf, len, "MOV.L @R%d+, R%d", Rm, Rn ); :}
16.15 MOV.L @(R0, Rm), Rn {: snprintf( buf, len, "MOV.L @(R0, R%d), R%d", Rm, Rn ); :}
16.16 MOV.L @(disp, GBR), R0 {: snprintf( buf, len, "MOV.L @(%d, GBR), R0",disp ); :}
16.17 -MOV.L @(disp, PC), Rn {: snprintf( buf, len, "MOV.L @($%xh), R%d ; <- #%08x", disp + (pc & 0xFFFFFFFC) + 4, Rn, sh4_read_long(disp+(addr&0xFFFFFFFC)+4) ); :}
16.18 +MOV.L @(disp, PC), Rn {:
16.19 + tmp = mmu_vma_to_phys_disasm(disp + (pc&0xFFFFFFFC) + 4);
16.20 + snprintf( buf, len, "MOV.L @($%xh), R%d ; <- #%08x", disp + (pc&0xFFFFFFFC)+4, Rn, ext_address_space[tmp>>12]->read_long(tmp) );
16.21 +:}
16.22 MOV.L @(disp, Rm), Rn {: snprintf( buf, len, "MOV.L @(%d, R%d), R%d", disp, Rm, Rn ); :}
16.23 MOV.W Rm, @Rn {: snprintf( buf, len, "MOV.W R%d, @R%d", Rm, Rn ); :}
16.24 MOV.W Rm, @-Rn {: snprintf( buf, len, "MOV.W R%d, @-R%d", Rm, Rn ); :}
16.25 @@ -200,7 +204,10 @@
16.26 MOV.W @Rm+, Rn {: snprintf( buf, len, "MOV.W @R%d+, R%d", Rm, Rn ); :}
16.27 MOV.W @(R0, Rm), Rn {: snprintf( buf, len, "MOV.W @(R0, R%d), R%d", Rm, Rn ); :}
16.28 MOV.W @(disp, GBR), R0 {: snprintf( buf, len, "MOV.W @(%d, GBR), R0", disp ); :}
16.29 -MOV.W @(disp, PC), Rn {: snprintf( buf, len, "MOV.W @($%xh), R%d ; <- #%08x", disp + pc + 4, Rn, sh4_read_word(disp+addr+4) ); :}
16.30 +MOV.W @(disp, PC), Rn {:
16.31 + tmp = mmu_vma_to_phys_disasm(disp+pc+4);
16.32 + snprintf( buf, len, "MOV.W @($%xh), R%d ; <- #%08x", disp+pc+4, Rn, ext_address_space[tmp>>12]->read_word(tmp) );
16.33 +:}
16.34 MOV.W @(disp, Rm), R0 {: snprintf( buf, len, "MOV.W @(%d, R%d), R0", disp, Rm ); :}
16.35 MOVA @(disp, PC), R0 {: snprintf( buf, len, "MOVA @($%xh), R0", disp + (pc&0xFFFFFFFC) + 4 ); :}
16.36 MOVCA.L R0, @Rn {: snprintf( buf, len, "MOVCA.L R0, @R%d", Rn ); :}
17.1 --- a/src/sh4/sh4mem.c Sat Dec 27 04:09:17 2008 +0000
17.2 +++ b/src/sh4/sh4mem.c Sat Jan 03 03:30:26 2009 +0000
17.3 @@ -1,6 +1,8 @@
17.4 /**
17.5 * $Id$
17.6 - * sh4mem.c is responsible for interfacing between the SH4's internal memory
17.7 + *
17.8 + * This is a deprecated module that is not yet completely extricated from the
17.9 + * surrounding code.
17.10 *
17.11 * Copyright (c) 2005 Nathan Keynes.
17.12 *
17.13 @@ -28,178 +30,7 @@
17.14 #include "sh4/xltcache.h"
17.15 #include "pvr2/pvr2.h"
17.16
17.17 -/* System regions (probably should be defined elsewhere) */
17.18 -extern struct mem_region_fn mem_region_unmapped;
17.19 -extern struct mem_region_fn mem_region_sdram;
17.20 -extern struct mem_region_fn mem_region_vram32;
17.21 -extern struct mem_region_fn mem_region_vram64;
17.22 -extern struct mem_region_fn mem_region_audioram;
17.23 -extern struct mem_region_fn mem_region_flashram;
17.24 -extern struct mem_region_fn mem_region_bootrom;
17.25 -
17.26 -/* On-chip regions other than defined MMIO regions */
17.27 -extern struct mem_region_fn p4_region_storequeue;
17.28 -extern struct mem_region_fn p4_region_icache_addr;
17.29 -extern struct mem_region_fn p4_region_icache_data;
17.30 -extern struct mem_region_fn p4_region_ocache_addr;
17.31 -extern struct mem_region_fn p4_region_ocache_data;
17.32 -extern struct mem_region_fn p4_region_itlb_addr;
17.33 -extern struct mem_region_fn p4_region_itlb_data;
17.34 -extern struct mem_region_fn p4_region_utlb_addr;
17.35 -extern struct mem_region_fn p4_region_utlb_data;
17.36 -
17.37 -/********************* The main ram address space **********************/
17.38 -static int32_t FASTCALL ext_sdram_read_long( sh4addr_t addr )
17.39 -{
17.40 - return *((int32_t *)(dc_main_ram + (addr&0x00FFFFFF)));
17.41 -}
17.42 -static int32_t FASTCALL ext_sdram_read_word( sh4addr_t addr )
17.43 -{
17.44 - return SIGNEXT16(*((int16_t *)(dc_main_ram + (addr&0x00FFFFFF))));
17.45 -}
17.46 -static int32_t FASTCALL ext_sdram_read_byte( sh4addr_t addr )
17.47 -{
17.48 - return SIGNEXT8(*((int16_t *)(dc_main_ram + (addr&0x00FFFFFF))));
17.49 -}
17.50 -static void FASTCALL ext_sdram_write_long( sh4addr_t addr, uint32_t val )
17.51 -{
17.52 - *(uint32_t *)(dc_main_ram + (addr&0x00FFFFFF)) = val;
17.53 - xlat_invalidate_long(addr);
17.54 -}
17.55 -static void FASTCALL ext_sdram_write_word( sh4addr_t addr, uint32_t val )
17.56 -{
17.57 - *(uint16_t *)(dc_main_ram + (addr&0x00FFFFFF)) = (uint16_t)val;
17.58 - xlat_invalidate_word(addr);
17.59 -}
17.60 -static void FASTCALL ext_sdram_write_byte( sh4addr_t addr, uint32_t val )
17.61 -{
17.62 - *(uint8_t *)(dc_main_ram + (addr&0x00FFFFFF)) = (uint8_t)val;
17.63 - xlat_invalidate_word(addr);
17.64 -}
17.65 -static void FASTCALL ext_sdram_read_burst( unsigned char *dest, sh4addr_t addr )
17.66 -{
17.67 - memcpy( dest, dc_main_ram+(addr&0x00FFFFFF), 32 );
17.68 -}
17.69 -static void FASTCALL ext_sdram_write_burst( sh4addr_t addr, unsigned char *src )
17.70 -{
17.71 - memcpy( dc_main_ram+(addr&0x00FFFFFF), src, 32 );
17.72 -}
17.73 -
17.74 -struct mem_region_fn mem_region_sdram = { ext_sdram_read_long, ext_sdram_write_long,
17.75 - ext_sdram_read_word, ext_sdram_write_word,
17.76 - ext_sdram_read_byte, ext_sdram_write_byte,
17.77 - ext_sdram_read_burst, ext_sdram_write_burst };
17.78 -
17.79 -
17.80 -/***************************** P4 Regions ************************************/
17.81 -
17.82 -/* Store-queue (long-write only?) */
17.83 -static void FASTCALL p4_storequeue_write_long( sh4addr_t addr, uint32_t val )
17.84 -{
17.85 - sh4r.store_queue[(addr>>2)&0xF] = val;
17.86 -}
17.87 -static int32_t FASTCALL p4_storequeue_read_long( sh4addr_t addr )
17.88 -{
17.89 - return sh4r.store_queue[(addr>>2)&0xF];
17.90 -}
17.91 -
17.92 -struct mem_region_fn p4_region_storequeue = {
17.93 - p4_storequeue_read_long, p4_storequeue_write_long,
17.94 - p4_storequeue_read_long, p4_storequeue_write_long,
17.95 - p4_storequeue_read_long, p4_storequeue_write_long,
17.96 - unmapped_read_burst, unmapped_write_burst }; // No burst access.
17.97 -
17.98 -/* TLB access */
17.99 -struct mem_region_fn p4_region_itlb_addr = {
17.100 - mmu_itlb_addr_read, mmu_itlb_addr_write,
17.101 - mmu_itlb_addr_read, mmu_itlb_addr_write,
17.102 - mmu_itlb_addr_read, mmu_itlb_addr_write,
17.103 - unmapped_read_burst, unmapped_write_burst };
17.104 -struct mem_region_fn p4_region_itlb_data = {
17.105 - mmu_itlb_data_read, mmu_itlb_data_write,
17.106 - mmu_itlb_data_read, mmu_itlb_data_write,
17.107 - mmu_itlb_data_read, mmu_itlb_data_write,
17.108 - unmapped_read_burst, unmapped_write_burst };
17.109 -struct mem_region_fn p4_region_utlb_addr = {
17.110 - mmu_utlb_addr_read, mmu_utlb_addr_write,
17.111 - mmu_utlb_addr_read, mmu_utlb_addr_write,
17.112 - mmu_utlb_addr_read, mmu_utlb_addr_write,
17.113 - unmapped_read_burst, unmapped_write_burst };
17.114 -struct mem_region_fn p4_region_utlb_data = {
17.115 - mmu_utlb_data_read, mmu_utlb_data_write,
17.116 - mmu_utlb_data_read, mmu_utlb_data_write,
17.117 - mmu_utlb_data_read, mmu_utlb_data_write,
17.118 - unmapped_read_burst, unmapped_write_burst };
17.119 -
17.120 -/********************** Initialization *************************/
17.121 -
17.122 -mem_region_fn_t *sh4_address_space;
17.123 -
17.124 -static void sh4_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
17.125 -{
17.126 - int count = (end - start) >> 12;
17.127 - mem_region_fn_t *ptr = &sh4_address_space[start>>12];
17.128 - while( count-- > 0 ) {
17.129 - *ptr++ = fn;
17.130 - }
17.131 -}
17.132 -
17.133 -static gboolean sh4_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
17.134 -{
17.135 - int i;
17.136 - for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
17.137 - sh4_address_space[(page|i)>>12] = fn;
17.138 - }
17.139 -}
17.140 -
17.141 -
17.142 -void sh4_mem_init()
17.143 -{
17.144 - int i;
17.145 - mem_region_fn_t *ptr;
17.146 - sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
17.147 - for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
17.148 - memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
17.149 - }
17.150 -
17.151 - /* Setup main P4 regions */
17.152 - sh4_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
17.153 - sh4_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
17.154 - sh4_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
17.155 - sh4_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
17.156 - sh4_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
17.157 - sh4_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
17.158 - sh4_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
17.159 - sh4_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
17.160 - sh4_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
17.161 - sh4_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
17.162 - sh4_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
17.163 -
17.164 - /* Setup P4 control region */
17.165 - sh4_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
17.166 - sh4_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
17.167 - sh4_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
17.168 - sh4_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
17.169 - sh4_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
17.170 - sh4_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
17.171 - sh4_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
17.172 - sh4_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
17.173 - sh4_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
17.174 - sh4_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
17.175 - sh4_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
17.176 - sh4_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
17.177 - sh4_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
17.178 -
17.179 - register_mem_page_remapped_hook( sh4_ext_page_remapped, NULL );
17.180 -}
17.181 -
17.182 -/************** Access methods ***************/
17.183 -#ifdef HAVE_FRAME_ADDRESS
17.184 -#define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
17.185 -#else
17.186 -#define RETURN_VIA(exc) return NULL
17.187 -#endif
17.188 -
17.189 +/************** Obsolete methods ***************/
17.190
17.191 int32_t FASTCALL sh4_read_long( sh4addr_t addr )
17.192 {
18.1 --- a/src/sh4/sh4mmio.h Sat Dec 27 04:09:17 2008 +0000
18.2 +++ b/src/sh4/sh4mmio.h Sat Jan 03 03:30:26 2009 +0000
18.3 @@ -203,19 +203,6 @@
18.4 MMIO_REGION( PMM )
18.5 MMIO_REGION_LIST_END
18.6
18.7 -/* mmucr register bits */
18.8 -#define MMUCR_AT 0x00000001 /* Address Translation enabled */
18.9 -#define MMUCR_TI 0x00000004 /* TLB invalidate (always read as 0) */
18.10 -#define MMUCR_SV 0x00000100 /* Single Virtual mode=1 / multiple virtual=0 */
18.11 -#define MMUCR_SQMD 0x00000200 /* Store queue mode bit (0=user, 1=priv only) */
18.12 -#define MMUCR_URC 0x0000FC00 /* UTLB access counter */
18.13 -#define MMUCR_URB 0x00FC0000 /* UTLB entry boundary */
18.14 -#define MMUCR_LRUI 0xFC000000 /* Least recently used ITLB */
18.15 -#define MMUCR_MASK 0xFCFCFF05
18.16 -#define MMUCR_RMASK 0xFCFCFF01 /* Read mask */
18.17 -
18.18 -#define IS_MMU_ENABLED() (MMIO_READ(MMU, MMUCR)&MMUCR_AT)
18.19 -
18.20 /* ccr register bits */
18.21 #define CCR_IIX 0x00008000 /* IC index enable */
18.22 #define CCR_ICI 0x00000800 /* IC invalidation (always read as 0) */
18.23 @@ -243,24 +230,6 @@
18.24 void mmu_set_cache_mode( int );
18.25 void mmu_ldtlb(void);
18.26
18.27 -int32_t FASTCALL mmu_icache_addr_read( sh4addr_t addr );
18.28 -int32_t FASTCALL mmu_icache_data_read( sh4addr_t addr );
18.29 -int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr );
18.30 -int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr );
18.31 -int32_t FASTCALL mmu_ocache_addr_read( sh4addr_t addr );
18.32 -int32_t FASTCALL mmu_ocache_data_read( sh4addr_t addr );
18.33 -int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr );
18.34 -int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr );
18.35 -void FASTCALL mmu_icache_addr_write( sh4addr_t addr, uint32_t val );
18.36 -void FASTCALL mmu_icache_data_write( sh4addr_t addr, uint32_t val );
18.37 -void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val );
18.38 -void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val );
18.39 -void FASTCALL mmu_ocache_addr_write( sh4addr_t addr, uint32_t val );
18.40 -void FASTCALL mmu_ocache_data_write( sh4addr_t addr, uint32_t val );
18.41 -void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val );
18.42 -void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val );
18.43 -
18.44 -
18.45 #ifdef __cplusplus
18.46 }
18.47 #endif
19.1 --- a/src/sh4/sh4x86.in Sat Dec 27 04:09:17 2008 +0000
19.2 +++ b/src/sh4/sh4x86.in Sat Jan 03 03:30:26 2009 +0000
19.3 @@ -32,6 +32,7 @@
19.4 #include "sh4/sh4stat.h"
19.5 #include "sh4/sh4mmio.h"
19.6 #include "sh4/x86op.h"
19.7 +#include "sh4/mmu.h"
19.8 #include "clock.h"
19.9
19.10 #define DEFAULT_BACKPATCH_SIZE 4096
19.11 @@ -177,6 +178,7 @@
19.12 OP32(value);
19.13 }
19.14
19.15 +
19.16 /**
19.17 * Load an immediate 64-bit quantity (note: x86-64 only)
19.18 */
19.19 @@ -287,29 +289,22 @@
19.20 #define UNDEF(ir)
19.21 #define MEM_REGION_PTR(name) offsetof( struct mem_region_fn, name )
19.22 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
19.23 -#define MEM_READ_BYTE( addr_reg, value_reg ) decode_address(addr_reg); call_func1_r32disp8(R_ECX, MEM_REGION_PTR(read_byte), addr_reg ); MEM_RESULT(value_reg)
19.24 -#define MEM_READ_WORD( addr_reg, value_reg ) decode_address(addr_reg); call_func1_r32disp8(R_ECX, MEM_REGION_PTR(read_word), addr_reg ); MEM_RESULT(value_reg)
19.25 -#define MEM_READ_LONG( addr_reg, value_reg ) decode_address(addr_reg); call_func1_r32disp8(R_ECX, MEM_REGION_PTR(read_long), addr_reg ); MEM_RESULT(value_reg)
19.26 -#define MEM_WRITE_BYTE( addr_reg, value_reg ) decode_address(addr_reg); call_func2_r32disp8(R_ECX, MEM_REGION_PTR(write_byte), addr_reg, value_reg)
19.27 -#define MEM_WRITE_WORD( addr_reg, value_reg ) decode_address(addr_reg); call_func2_r32disp8(R_ECX, MEM_REGION_PTR(write_word), addr_reg, value_reg)
19.28 -#define MEM_WRITE_LONG( addr_reg, value_reg ) decode_address(addr_reg); call_func2_r32disp8(R_ECX, MEM_REGION_PTR(write_long), addr_reg, value_reg)
19.29 -
19.30 -#ifdef HAVE_FRAME_ADDRESS
19.31 -/**
19.32 - * Perform MMU translation on the address in addr_reg for a read operation, iff the TLB is turned
19.33 - * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
19.34 +/* Note: For SR.MD == 1 && MMUCR.AT == 0, there are no memory exceptions, so
19.35 + * don't waste the cycles expecting them. Otherwise we need to save the exception pointer.
19.36 */
19.37 -#define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1_exc(mmu_vma_to_phys_read, addr_reg, pc); MEM_RESULT(addr_reg); }
19.38 -
19.39 -/**
19.40 - * Perform MMU translation on the address in addr_reg for a write operation, iff the TLB is turned
19.41 - * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
19.42 - */
19.43 -#define MMU_TRANSLATE_WRITE( addr_reg ) if( sh4_x86.tlb_on ) { call_func1_exc(mmu_vma_to_phys_write, addr_reg, pc); MEM_RESULT(addr_reg); }
19.44 -#else
19.45 -#define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
19.46 -#define MMU_TRANSLATE_WRITE( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
19.47 -#endif
19.48 +#define _CALL_READ(addr_reg, fn) if( !sh4_x86.tlb_on && (sh4r.xlat_sh4_mode & SR_MD) ) { \
19.49 + call_func1_r32disp8(R_ECX, MEM_REGION_PTR(fn), addr_reg); } else { \
19.50 + call_func1_r32disp8_exc(R_ECX, MEM_REGION_PTR(fn), addr_reg, pc); }
19.51 +#define _CALL_WRITE(addr_reg, val_reg, fn) if( !sh4_x86.tlb_on && (sh4r.xlat_sh4_mode & SR_MD) ) { \
19.52 + call_func2_r32disp8(R_ECX, MEM_REGION_PTR(fn), addr_reg, val_reg); } else { \
19.53 + call_func2_r32disp8_exc(R_ECX, MEM_REGION_PTR(fn), addr_reg, val_reg, pc); }
19.54 +
19.55 +#define MEM_READ_BYTE( addr_reg, value_reg ) decode_address(addr_reg); _CALL_READ(addr_reg, read_byte); MEM_RESULT(value_reg)
19.56 +#define MEM_READ_WORD( addr_reg, value_reg ) decode_address(addr_reg); _CALL_READ(addr_reg, read_word); MEM_RESULT(value_reg)
19.57 +#define MEM_READ_LONG( addr_reg, value_reg ) decode_address(addr_reg); _CALL_READ(addr_reg, read_long); MEM_RESULT(value_reg)
19.58 +#define MEM_WRITE_BYTE( addr_reg, value_reg ) decode_address(addr_reg); _CALL_WRITE(addr_reg, value_reg, write_byte)
19.59 +#define MEM_WRITE_WORD( addr_reg, value_reg ) decode_address(addr_reg); _CALL_WRITE(addr_reg, value_reg, write_word)
19.60 +#define MEM_WRITE_LONG( addr_reg, value_reg ) decode_address(addr_reg); _CALL_WRITE(addr_reg, value_reg, write_long)
19.61
19.62 #define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = DELAY_NONE; return 2;
19.63
19.64 @@ -328,7 +323,7 @@
19.65 sh4_x86.branch_taken = FALSE;
19.66 sh4_x86.backpatch_posn = 0;
19.67 sh4_x86.block_start_pc = pc;
19.68 - sh4_x86.tlb_on = IS_MMU_ENABLED();
19.69 + sh4_x86.tlb_on = IS_TLB_ENABLED();
19.70 sh4_x86.tstate = TSTATE_NONE;
19.71 sh4_x86.double_prec = sh4r.fpscr & FPSCR_PR;
19.72 sh4_x86.double_size = sh4r.fpscr & FPSCR_SZ;
19.73 @@ -421,9 +416,7 @@
19.74 :}
19.75 ADD #imm, Rn {:
19.76 COUNT_INST(I_ADDI);
19.77 - load_reg( R_EAX, Rn );
19.78 - ADD_imm8s_r32( imm, R_EAX );
19.79 - store_reg( R_EAX, Rn );
19.80 + ADD_imm8s_sh4r( imm, REG_OFFSET(r[Rn]) );
19.81 sh4_x86.tstate = TSTATE_NONE;
19.82 :}
19.83 ADDC Rm, Rn {:
19.84 @@ -465,9 +458,7 @@
19.85 AND.B #imm, @(R0, GBR) {:
19.86 COUNT_INST(I_ANDB);
19.87 load_reg( R_EAX, 0 );
19.88 - load_spreg( R_ECX, R_GBR );
19.89 - ADD_r32_r32( R_ECX, R_EAX );
19.90 - MMU_TRANSLATE_WRITE( R_EAX );
19.91 + ADD_sh4r_r32( R_GBR, R_EAX );
19.92 MOV_r32_esp8(R_EAX, 0);
19.93 MEM_READ_BYTE( R_EAX, R_EDX );
19.94 MOV_esp8_r32(0, R_EAX);
19.95 @@ -656,32 +647,25 @@
19.96 if( Rm == Rn ) {
19.97 load_reg( R_EAX, Rm );
19.98 check_ralign32( R_EAX );
19.99 - MMU_TRANSLATE_READ( R_EAX );
19.100 + MEM_READ_LONG( R_EAX, R_EAX );
19.101 MOV_r32_esp8(R_EAX, 0);
19.102 - load_reg( R_EAX, Rn );
19.103 - ADD_imm8s_r32( 4, R_EAX );
19.104 - MMU_TRANSLATE_READ( R_EAX );
19.105 - ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rn]) );
19.106 - // Note translate twice in case of page boundaries. Maybe worth
19.107 - // adding a page-boundary check to skip the second translation
19.108 + load_reg( R_EAX, Rm );
19.109 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
19.110 + MEM_READ_LONG( R_EAX, R_EAX );
19.111 + ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rn]) );
19.112 } else {
19.113 load_reg( R_EAX, Rm );
19.114 check_ralign32( R_EAX );
19.115 - MMU_TRANSLATE_READ( R_EAX );
19.116 + MEM_READ_LONG( R_EAX, R_EAX );
19.117 MOV_r32_esp8( R_EAX, 0 );
19.118 load_reg( R_EAX, Rn );
19.119 check_ralign32( R_EAX );
19.120 - MMU_TRANSLATE_READ( R_EAX );
19.121 + MEM_READ_LONG( R_EAX, R_EAX );
19.122 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
19.123 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.124 }
19.125 - MEM_READ_LONG( R_EAX, R_EAX );
19.126 - MOV_r32_esp8( R_EAX, 4 );
19.127 - MOV_esp8_r32( 0, R_EAX );
19.128 - MEM_READ_LONG( R_EAX, R_EAX );
19.129 - MOV_esp8_r32( 4, R_ECX );
19.130 -
19.131 - IMUL_r32( R_ECX );
19.132 +
19.133 + IMUL_esp8( 0 );
19.134 ADD_r32_sh4r( R_EAX, R_MACL );
19.135 ADC_r32_sh4r( R_EDX, R_MACH );
19.136
19.137 @@ -697,32 +681,26 @@
19.138 if( Rm == Rn ) {
19.139 load_reg( R_EAX, Rm );
19.140 check_ralign16( R_EAX );
19.141 - MMU_TRANSLATE_READ( R_EAX );
19.142 + MEM_READ_WORD( R_EAX, R_EAX );
19.143 MOV_r32_esp8( R_EAX, 0 );
19.144 - load_reg( R_EAX, Rn );
19.145 - ADD_imm8s_r32( 2, R_EAX );
19.146 - MMU_TRANSLATE_READ( R_EAX );
19.147 + load_reg( R_EAX, Rm );
19.148 + LEA_r32disp8_r32( R_EAX, 2, R_EAX );
19.149 + MEM_READ_WORD( R_EAX, R_EAX );
19.150 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
19.151 // Note translate twice in case of page boundaries. Maybe worth
19.152 // adding a page-boundary check to skip the second translation
19.153 } else {
19.154 load_reg( R_EAX, Rm );
19.155 check_ralign16( R_EAX );
19.156 - MMU_TRANSLATE_READ( R_EAX );
19.157 + MEM_READ_WORD( R_EAX, R_EAX );
19.158 MOV_r32_esp8( R_EAX, 0 );
19.159 load_reg( R_EAX, Rn );
19.160 check_ralign16( R_EAX );
19.161 - MMU_TRANSLATE_READ( R_EAX );
19.162 + MEM_READ_WORD( R_EAX, R_EAX );
19.163 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
19.164 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
19.165 }
19.166 - MEM_READ_WORD( R_EAX, R_EAX );
19.167 - MOV_r32_esp8( R_EAX, 4 );
19.168 - MOV_esp8_r32( 0, R_EAX );
19.169 - MEM_READ_WORD( R_EAX, R_EAX );
19.170 - MOV_esp8_r32( 4, R_ECX );
19.171 -
19.172 - IMUL_r32( R_ECX );
19.173 + IMUL_esp8( 0 );
19.174 load_spreg( R_ECX, R_S );
19.175 TEST_r32_r32( R_ECX, R_ECX );
19.176 JE_rel8( nosat );
19.177 @@ -820,9 +798,7 @@
19.178 OR.B #imm, @(R0, GBR) {:
19.179 COUNT_INST(I_ORB);
19.180 load_reg( R_EAX, 0 );
19.181 - load_spreg( R_ECX, R_GBR );
19.182 - ADD_r32_r32( R_ECX, R_EAX );
19.183 - MMU_TRANSLATE_WRITE( R_EAX );
19.184 + ADD_sh4r_r32( R_GBR, R_EAX );
19.185 MOV_r32_esp8( R_EAX, 0 );
19.186 MEM_READ_BYTE( R_EAX, R_EDX );
19.187 MOV_esp8_r32( 0, R_EAX );
19.188 @@ -1041,7 +1017,6 @@
19.189 TAS.B @Rn {:
19.190 COUNT_INST(I_TASB);
19.191 load_reg( R_EAX, Rn );
19.192 - MMU_TRANSLATE_WRITE( R_EAX );
19.193 MOV_r32_esp8( R_EAX, 0 );
19.194 MEM_READ_BYTE( R_EAX, R_EDX );
19.195 TEST_r8_r8( R_DL, R_DL );
19.196 @@ -1069,9 +1044,7 @@
19.197 TST.B #imm, @(R0, GBR) {:
19.198 COUNT_INST(I_TSTB);
19.199 load_reg( R_EAX, 0);
19.200 - load_reg( R_ECX, R_GBR);
19.201 - ADD_r32_r32( R_ECX, R_EAX );
19.202 - MMU_TRANSLATE_READ( R_EAX );
19.203 + ADD_sh4r_r32( R_GBR, R_EAX );
19.204 MEM_READ_BYTE( R_EAX, R_EAX );
19.205 TEST_imm8_r8( imm, R_AL );
19.206 SETE_t();
19.207 @@ -1095,9 +1068,7 @@
19.208 XOR.B #imm, @(R0, GBR) {:
19.209 COUNT_INST(I_XORB);
19.210 load_reg( R_EAX, 0 );
19.211 - load_spreg( R_ECX, R_GBR );
19.212 - ADD_r32_r32( R_ECX, R_EAX );
19.213 - MMU_TRANSLATE_WRITE( R_EAX );
19.214 + ADD_sh4r_r32( R_GBR, R_EAX );
19.215 MOV_r32_esp8( R_EAX, 0 );
19.216 MEM_READ_BYTE(R_EAX, R_EDX);
19.217 MOV_esp8_r32( 0, R_EAX );
19.218 @@ -1130,7 +1101,6 @@
19.219 MOV.B Rm, @Rn {:
19.220 COUNT_INST(I_MOVB);
19.221 load_reg( R_EAX, Rn );
19.222 - MMU_TRANSLATE_WRITE( R_EAX );
19.223 load_reg( R_EDX, Rm );
19.224 MEM_WRITE_BYTE( R_EAX, R_EDX );
19.225 sh4_x86.tstate = TSTATE_NONE;
19.226 @@ -1138,19 +1108,16 @@
19.227 MOV.B Rm, @-Rn {:
19.228 COUNT_INST(I_MOVB);
19.229 load_reg( R_EAX, Rn );
19.230 - ADD_imm8s_r32( -1, R_EAX );
19.231 - MMU_TRANSLATE_WRITE( R_EAX );
19.232 + LEA_r32disp8_r32( R_EAX, -1, R_EAX );
19.233 load_reg( R_EDX, Rm );
19.234 + MEM_WRITE_BYTE( R_EAX, R_EDX );
19.235 ADD_imm8s_sh4r( -1, REG_OFFSET(r[Rn]) );
19.236 - MEM_WRITE_BYTE( R_EAX, R_EDX );
19.237 sh4_x86.tstate = TSTATE_NONE;
19.238 :}
19.239 MOV.B Rm, @(R0, Rn) {:
19.240 COUNT_INST(I_MOVB);
19.241 load_reg( R_EAX, 0 );
19.242 - load_reg( R_ECX, Rn );
19.243 - ADD_r32_r32( R_ECX, R_EAX );
19.244 - MMU_TRANSLATE_WRITE( R_EAX );
19.245 + ADD_sh4r_r32( REG_OFFSET(r[Rn]), R_EAX );
19.246 load_reg( R_EDX, Rm );
19.247 MEM_WRITE_BYTE( R_EAX, R_EDX );
19.248 sh4_x86.tstate = TSTATE_NONE;
19.249 @@ -1159,7 +1126,6 @@
19.250 COUNT_INST(I_MOVB);
19.251 load_spreg( R_EAX, R_GBR );
19.252 ADD_imm32_r32( disp, R_EAX );
19.253 - MMU_TRANSLATE_WRITE( R_EAX );
19.254 load_reg( R_EDX, 0 );
19.255 MEM_WRITE_BYTE( R_EAX, R_EDX );
19.256 sh4_x86.tstate = TSTATE_NONE;
19.257 @@ -1168,7 +1134,6 @@
19.258 COUNT_INST(I_MOVB);
19.259 load_reg( R_EAX, Rn );
19.260 ADD_imm32_r32( disp, R_EAX );
19.261 - MMU_TRANSLATE_WRITE( R_EAX );
19.262 load_reg( R_EDX, 0 );
19.263 MEM_WRITE_BYTE( R_EAX, R_EDX );
19.264 sh4_x86.tstate = TSTATE_NONE;
19.265 @@ -1176,7 +1141,6 @@
19.266 MOV.B @Rm, Rn {:
19.267 COUNT_INST(I_MOVB);
19.268 load_reg( R_EAX, Rm );
19.269 - MMU_TRANSLATE_READ( R_EAX );
19.270 MEM_READ_BYTE( R_EAX, R_EAX );
19.271 store_reg( R_EAX, Rn );
19.272 sh4_x86.tstate = TSTATE_NONE;
19.273 @@ -1184,18 +1148,17 @@
19.274 MOV.B @Rm+, Rn {:
19.275 COUNT_INST(I_MOVB);
19.276 load_reg( R_EAX, Rm );
19.277 - MMU_TRANSLATE_READ( R_EAX );
19.278 - ADD_imm8s_sh4r( 1, REG_OFFSET(r[Rm]) );
19.279 MEM_READ_BYTE( R_EAX, R_EAX );
19.280 + if( Rm != Rn ) {
19.281 + ADD_imm8s_sh4r( 1, REG_OFFSET(r[Rm]) );
19.282 + }
19.283 store_reg( R_EAX, Rn );
19.284 sh4_x86.tstate = TSTATE_NONE;
19.285 :}
19.286 MOV.B @(R0, Rm), Rn {:
19.287 COUNT_INST(I_MOVB);
19.288 load_reg( R_EAX, 0 );
19.289 - load_reg( R_ECX, Rm );
19.290 - ADD_r32_r32( R_ECX, R_EAX );
19.291 - MMU_TRANSLATE_READ( R_EAX )
19.292 + ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
19.293 MEM_READ_BYTE( R_EAX, R_EAX );
19.294 store_reg( R_EAX, Rn );
19.295 sh4_x86.tstate = TSTATE_NONE;
19.296 @@ -1204,7 +1167,6 @@
19.297 COUNT_INST(I_MOVB);
19.298 load_spreg( R_EAX, R_GBR );
19.299 ADD_imm32_r32( disp, R_EAX );
19.300 - MMU_TRANSLATE_READ( R_EAX );
19.301 MEM_READ_BYTE( R_EAX, R_EAX );
19.302 store_reg( R_EAX, 0 );
19.303 sh4_x86.tstate = TSTATE_NONE;
19.304 @@ -1213,7 +1175,6 @@
19.305 COUNT_INST(I_MOVB);
19.306 load_reg( R_EAX, Rm );
19.307 ADD_imm32_r32( disp, R_EAX );
19.308 - MMU_TRANSLATE_READ( R_EAX );
19.309 MEM_READ_BYTE( R_EAX, R_EAX );
19.310 store_reg( R_EAX, 0 );
19.311 sh4_x86.tstate = TSTATE_NONE;
19.312 @@ -1231,7 +1192,6 @@
19.313 MOV_r32_ebpr32disp32( R_EDX, R_EAX, REG_OFFSET(store_queue) );
19.314 JMP_rel8(end);
19.315 JMP_TARGET(notsq);
19.316 - MMU_TRANSLATE_WRITE( R_EAX );
19.317 load_reg( R_EDX, Rm );
19.318 MEM_WRITE_LONG( R_EAX, R_EDX );
19.319 JMP_TARGET(end);
19.320 @@ -1242,19 +1202,16 @@
19.321 load_reg( R_EAX, Rn );
19.322 ADD_imm8s_r32( -4, R_EAX );
19.323 check_walign32( R_EAX );
19.324 - MMU_TRANSLATE_WRITE( R_EAX );
19.325 load_reg( R_EDX, Rm );
19.326 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.327 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.328 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.329 sh4_x86.tstate = TSTATE_NONE;
19.330 :}
19.331 MOV.L Rm, @(R0, Rn) {:
19.332 COUNT_INST(I_MOVL);
19.333 load_reg( R_EAX, 0 );
19.334 - load_reg( R_ECX, Rn );
19.335 - ADD_r32_r32( R_ECX, R_EAX );
19.336 + ADD_sh4r_r32( REG_OFFSET(r[Rn]), R_EAX );
19.337 check_walign32( R_EAX );
19.338 - MMU_TRANSLATE_WRITE( R_EAX );
19.339 load_reg( R_EDX, Rm );
19.340 MEM_WRITE_LONG( R_EAX, R_EDX );
19.341 sh4_x86.tstate = TSTATE_NONE;
19.342 @@ -1264,7 +1221,6 @@
19.343 load_spreg( R_EAX, R_GBR );
19.344 ADD_imm32_r32( disp, R_EAX );
19.345 check_walign32( R_EAX );
19.346 - MMU_TRANSLATE_WRITE( R_EAX );
19.347 load_reg( R_EDX, 0 );
19.348 MEM_WRITE_LONG( R_EAX, R_EDX );
19.349 sh4_x86.tstate = TSTATE_NONE;
19.350 @@ -1283,7 +1239,6 @@
19.351 MOV_r32_ebpr32disp32( R_EDX, R_EAX, REG_OFFSET(store_queue) );
19.352 JMP_rel8(end);
19.353 JMP_TARGET(notsq);
19.354 - MMU_TRANSLATE_WRITE( R_EAX );
19.355 load_reg( R_EDX, Rm );
19.356 MEM_WRITE_LONG( R_EAX, R_EDX );
19.357 JMP_TARGET(end);
19.358 @@ -1293,7 +1248,6 @@
19.359 COUNT_INST(I_MOVL);
19.360 load_reg( R_EAX, Rm );
19.361 check_ralign32( R_EAX );
19.362 - MMU_TRANSLATE_READ( R_EAX );
19.363 MEM_READ_LONG( R_EAX, R_EAX );
19.364 store_reg( R_EAX, Rn );
19.365 sh4_x86.tstate = TSTATE_NONE;
19.366 @@ -1302,19 +1256,18 @@
19.367 COUNT_INST(I_MOVL);
19.368 load_reg( R_EAX, Rm );
19.369 check_ralign32( R_EAX );
19.370 - MMU_TRANSLATE_READ( R_EAX );
19.371 - ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.372 MEM_READ_LONG( R_EAX, R_EAX );
19.373 + if( Rm != Rn ) {
19.374 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.375 + }
19.376 store_reg( R_EAX, Rn );
19.377 sh4_x86.tstate = TSTATE_NONE;
19.378 :}
19.379 MOV.L @(R0, Rm), Rn {:
19.380 COUNT_INST(I_MOVL);
19.381 load_reg( R_EAX, 0 );
19.382 - load_reg( R_ECX, Rm );
19.383 - ADD_r32_r32( R_ECX, R_EAX );
19.384 + ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
19.385 check_ralign32( R_EAX );
19.386 - MMU_TRANSLATE_READ( R_EAX );
19.387 MEM_READ_LONG( R_EAX, R_EAX );
19.388 store_reg( R_EAX, Rn );
19.389 sh4_x86.tstate = TSTATE_NONE;
19.390 @@ -1324,7 +1277,6 @@
19.391 load_spreg( R_EAX, R_GBR );
19.392 ADD_imm32_r32( disp, R_EAX );
19.393 check_ralign32( R_EAX );
19.394 - MMU_TRANSLATE_READ( R_EAX );
19.395 MEM_READ_LONG( R_EAX, R_EAX );
19.396 store_reg( R_EAX, 0 );
19.397 sh4_x86.tstate = TSTATE_NONE;
19.398 @@ -1353,7 +1305,6 @@
19.399 // but we can safely assume that the low bits are the same.
19.400 load_imm32( R_EAX, (pc-sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
19.401 ADD_sh4r_r32( R_PC, R_EAX );
19.402 - MMU_TRANSLATE_READ( R_EAX );
19.403 MEM_READ_LONG( R_EAX, R_EAX );
19.404 sh4_x86.tstate = TSTATE_NONE;
19.405 }
19.406 @@ -1365,7 +1316,6 @@
19.407 load_reg( R_EAX, Rm );
19.408 ADD_imm8s_r32( disp, R_EAX );
19.409 check_ralign32( R_EAX );
19.410 - MMU_TRANSLATE_READ( R_EAX );
19.411 MEM_READ_LONG( R_EAX, R_EAX );
19.412 store_reg( R_EAX, Rn );
19.413 sh4_x86.tstate = TSTATE_NONE;
19.414 @@ -1374,7 +1324,6 @@
19.415 COUNT_INST(I_MOVW);
19.416 load_reg( R_EAX, Rn );
19.417 check_walign16( R_EAX );
19.418 - MMU_TRANSLATE_WRITE( R_EAX )
19.419 load_reg( R_EDX, Rm );
19.420 MEM_WRITE_WORD( R_EAX, R_EDX );
19.421 sh4_x86.tstate = TSTATE_NONE;
19.422 @@ -1382,21 +1331,18 @@
19.423 MOV.W Rm, @-Rn {:
19.424 COUNT_INST(I_MOVW);
19.425 load_reg( R_EAX, Rn );
19.426 - ADD_imm8s_r32( -2, R_EAX );
19.427 check_walign16( R_EAX );
19.428 - MMU_TRANSLATE_WRITE( R_EAX );
19.429 + LEA_r32disp8_r32( R_EAX, -2, R_EAX );
19.430 load_reg( R_EDX, Rm );
19.431 + MEM_WRITE_WORD( R_EAX, R_EDX );
19.432 ADD_imm8s_sh4r( -2, REG_OFFSET(r[Rn]) );
19.433 - MEM_WRITE_WORD( R_EAX, R_EDX );
19.434 sh4_x86.tstate = TSTATE_NONE;
19.435 :}
19.436 MOV.W Rm, @(R0, Rn) {:
19.437 COUNT_INST(I_MOVW);
19.438 load_reg( R_EAX, 0 );
19.439 - load_reg( R_ECX, Rn );
19.440 - ADD_r32_r32( R_ECX, R_EAX );
19.441 + ADD_sh4r_r32( REG_OFFSET(r[Rn]), R_EAX );
19.442 check_walign16( R_EAX );
19.443 - MMU_TRANSLATE_WRITE( R_EAX );
19.444 load_reg( R_EDX, Rm );
19.445 MEM_WRITE_WORD( R_EAX, R_EDX );
19.446 sh4_x86.tstate = TSTATE_NONE;
19.447 @@ -1406,7 +1352,6 @@
19.448 load_spreg( R_EAX, R_GBR );
19.449 ADD_imm32_r32( disp, R_EAX );
19.450 check_walign16( R_EAX );
19.451 - MMU_TRANSLATE_WRITE( R_EAX );
19.452 load_reg( R_EDX, 0 );
19.453 MEM_WRITE_WORD( R_EAX, R_EDX );
19.454 sh4_x86.tstate = TSTATE_NONE;
19.455 @@ -1416,7 +1361,6 @@
19.456 load_reg( R_EAX, Rn );
19.457 ADD_imm32_r32( disp, R_EAX );
19.458 check_walign16( R_EAX );
19.459 - MMU_TRANSLATE_WRITE( R_EAX );
19.460 load_reg( R_EDX, 0 );
19.461 MEM_WRITE_WORD( R_EAX, R_EDX );
19.462 sh4_x86.tstate = TSTATE_NONE;
19.463 @@ -1425,7 +1369,6 @@
19.464 COUNT_INST(I_MOVW);
19.465 load_reg( R_EAX, Rm );
19.466 check_ralign16( R_EAX );
19.467 - MMU_TRANSLATE_READ( R_EAX );
19.468 MEM_READ_WORD( R_EAX, R_EAX );
19.469 store_reg( R_EAX, Rn );
19.470 sh4_x86.tstate = TSTATE_NONE;
19.471 @@ -1434,19 +1377,18 @@
19.472 COUNT_INST(I_MOVW);
19.473 load_reg( R_EAX, Rm );
19.474 check_ralign16( R_EAX );
19.475 - MMU_TRANSLATE_READ( R_EAX );
19.476 - ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
19.477 MEM_READ_WORD( R_EAX, R_EAX );
19.478 + if( Rm != Rn ) {
19.479 + ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
19.480 + }
19.481 store_reg( R_EAX, Rn );
19.482 sh4_x86.tstate = TSTATE_NONE;
19.483 :}
19.484 MOV.W @(R0, Rm), Rn {:
19.485 COUNT_INST(I_MOVW);
19.486 load_reg( R_EAX, 0 );
19.487 - load_reg( R_ECX, Rm );
19.488 - ADD_r32_r32( R_ECX, R_EAX );
19.489 + ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
19.490 check_ralign16( R_EAX );
19.491 - MMU_TRANSLATE_READ( R_EAX );
19.492 MEM_READ_WORD( R_EAX, R_EAX );
19.493 store_reg( R_EAX, Rn );
19.494 sh4_x86.tstate = TSTATE_NONE;
19.495 @@ -1456,7 +1398,6 @@
19.496 load_spreg( R_EAX, R_GBR );
19.497 ADD_imm32_r32( disp, R_EAX );
19.498 check_ralign16( R_EAX );
19.499 - MMU_TRANSLATE_READ( R_EAX );
19.500 MEM_READ_WORD( R_EAX, R_EAX );
19.501 store_reg( R_EAX, 0 );
19.502 sh4_x86.tstate = TSTATE_NONE;
19.503 @@ -1475,7 +1416,6 @@
19.504 } else {
19.505 load_imm32( R_EAX, (pc - sh4_x86.block_start_pc) + disp + 4 );
19.506 ADD_sh4r_r32( R_PC, R_EAX );
19.507 - MMU_TRANSLATE_READ( R_EAX );
19.508 MEM_READ_WORD( R_EAX, R_EAX );
19.509 sh4_x86.tstate = TSTATE_NONE;
19.510 }
19.511 @@ -1487,7 +1427,6 @@
19.512 load_reg( R_EAX, Rm );
19.513 ADD_imm32_r32( disp, R_EAX );
19.514 check_ralign16( R_EAX );
19.515 - MMU_TRANSLATE_READ( R_EAX );
19.516 MEM_READ_WORD( R_EAX, R_EAX );
19.517 store_reg( R_EAX, 0 );
19.518 sh4_x86.tstate = TSTATE_NONE;
19.519 @@ -1507,7 +1446,6 @@
19.520 COUNT_INST(I_MOVCA);
19.521 load_reg( R_EAX, Rn );
19.522 check_walign32( R_EAX );
19.523 - MMU_TRANSLATE_WRITE( R_EAX );
19.524 load_reg( R_EDX, 0 );
19.525 MEM_WRITE_LONG( R_EAX, R_EDX );
19.526 sh4_x86.tstate = TSTATE_NONE;
19.527 @@ -1857,13 +1795,14 @@
19.528 load_reg( R_EAX, Rn );
19.529 if( sh4_x86.double_size ) {
19.530 check_walign64( R_EAX );
19.531 - MMU_TRANSLATE_WRITE( R_EAX );
19.532 load_dr0( R_EDX, FRm );
19.533 - load_dr1( R_ECX, FRm );
19.534 - MEM_WRITE_DOUBLE( R_EAX, R_EDX, R_ECX );
19.535 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.536 + load_reg( R_EAX, Rn );
19.537 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
19.538 + load_dr1( R_EDX, FRm );
19.539 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.540 } else {
19.541 check_walign32( R_EAX );
19.542 - MMU_TRANSLATE_WRITE( R_EAX );
19.543 load_fr( R_EDX, FRm );
19.544 MEM_WRITE_LONG( R_EAX, R_EDX );
19.545 }
19.546 @@ -1875,13 +1814,14 @@
19.547 load_reg( R_EAX, Rm );
19.548 if( sh4_x86.double_size ) {
19.549 check_ralign64( R_EAX );
19.550 - MMU_TRANSLATE_READ( R_EAX );
19.551 - MEM_READ_DOUBLE( R_EAX, R_EDX, R_EAX );
19.552 - store_dr0( R_EDX, FRn );
19.553 - store_dr1( R_EAX, FRn );
19.554 + MEM_READ_LONG( R_EAX, R_EAX );
19.555 + store_dr0( R_EAX, FRn );
19.556 + load_reg( R_EAX, Rm );
19.557 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
19.558 + MEM_READ_LONG( R_EAX, R_EAX );
19.559 + store_dr1( R_EAX, FRn );
19.560 } else {
19.561 check_ralign32( R_EAX );
19.562 - MMU_TRANSLATE_READ( R_EAX );
19.563 MEM_READ_LONG( R_EAX, R_EAX );
19.564 store_fr( R_EAX, FRn );
19.565 }
19.566 @@ -1893,19 +1833,20 @@
19.567 load_reg( R_EAX, Rn );
19.568 if( sh4_x86.double_size ) {
19.569 check_walign64( R_EAX );
19.570 - ADD_imm8s_r32(-8,R_EAX);
19.571 - MMU_TRANSLATE_WRITE( R_EAX );
19.572 + LEA_r32disp8_r32( R_EAX, -8, R_EAX );
19.573 load_dr0( R_EDX, FRm );
19.574 - load_dr1( R_ECX, FRm );
19.575 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.576 + load_reg( R_EAX, Rn );
19.577 + LEA_r32disp8_r32( R_EAX, -4, R_EAX );
19.578 + load_dr1( R_EDX, FRm );
19.579 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.580 ADD_imm8s_sh4r(-8,REG_OFFSET(r[Rn]));
19.581 - MEM_WRITE_DOUBLE( R_EAX, R_EDX, R_ECX );
19.582 } else {
19.583 check_walign32( R_EAX );
19.584 - ADD_imm8s_r32( -4, R_EAX );
19.585 - MMU_TRANSLATE_WRITE( R_EAX );
19.586 + LEA_r32disp8_r32( R_EAX, -4, R_EAX );
19.587 load_fr( R_EDX, FRm );
19.588 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.589 ADD_imm8s_sh4r(-4,REG_OFFSET(r[Rn]));
19.590 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.591 }
19.592 sh4_x86.tstate = TSTATE_NONE;
19.593 :}
19.594 @@ -1915,17 +1856,18 @@
19.595 load_reg( R_EAX, Rm );
19.596 if( sh4_x86.double_size ) {
19.597 check_ralign64( R_EAX );
19.598 - MMU_TRANSLATE_READ( R_EAX );
19.599 + MEM_READ_LONG( R_EAX, R_EAX );
19.600 + store_dr0( R_EAX, FRn );
19.601 + load_reg( R_EAX, Rm );
19.602 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
19.603 + MEM_READ_LONG( R_EAX, R_EAX );
19.604 + store_dr1( R_EAX, FRn );
19.605 ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rm]) );
19.606 - MEM_READ_DOUBLE( R_EAX, R_EDX, R_EAX );
19.607 - store_dr0( R_EDX, FRn );
19.608 - store_dr1( R_EAX, FRn );
19.609 } else {
19.610 check_ralign32( R_EAX );
19.611 - MMU_TRANSLATE_READ( R_EAX );
19.612 - ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.613 MEM_READ_LONG( R_EAX, R_EAX );
19.614 store_fr( R_EAX, FRn );
19.615 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.616 }
19.617 sh4_x86.tstate = TSTATE_NONE;
19.618 :}
19.619 @@ -1936,13 +1878,15 @@
19.620 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
19.621 if( sh4_x86.double_size ) {
19.622 check_walign64( R_EAX );
19.623 - MMU_TRANSLATE_WRITE( R_EAX );
19.624 load_dr0( R_EDX, FRm );
19.625 - load_dr1( R_ECX, FRm );
19.626 - MEM_WRITE_DOUBLE( R_EAX, R_EDX, R_ECX );
19.627 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.628 + load_reg( R_EAX, Rn );
19.629 + ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
19.630 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
19.631 + load_dr1( R_EDX, FRm );
19.632 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.633 } else {
19.634 check_walign32( R_EAX );
19.635 - MMU_TRANSLATE_WRITE( R_EAX );
19.636 load_fr( R_EDX, FRm );
19.637 MEM_WRITE_LONG( R_EAX, R_EDX ); // 12
19.638 }
19.639 @@ -1955,13 +1899,15 @@
19.640 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
19.641 if( sh4_x86.double_size ) {
19.642 check_ralign64( R_EAX );
19.643 - MMU_TRANSLATE_READ( R_EAX );
19.644 - MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
19.645 - store_dr0( R_ECX, FRn );
19.646 + MEM_READ_LONG( R_EAX, R_EAX );
19.647 + store_dr0( R_EAX, FRn );
19.648 + load_reg( R_EAX, Rm );
19.649 + ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
19.650 + LEA_r32disp8_r32( R_EAX, 4, R_EAX );
19.651 + MEM_READ_LONG( R_EAX, R_EAX );
19.652 store_dr1( R_EAX, FRn );
19.653 } else {
19.654 check_ralign32( R_EAX );
19.655 - MMU_TRANSLATE_READ( R_EAX );
19.656 MEM_READ_LONG( R_EAX, R_EAX );
19.657 store_fr( R_EAX, FRn );
19.658 }
19.659 @@ -2374,9 +2320,8 @@
19.660 COUNT_INST(I_LDCM);
19.661 load_reg( R_EAX, Rm );
19.662 check_ralign32( R_EAX );
19.663 - MMU_TRANSLATE_READ( R_EAX );
19.664 + MEM_READ_LONG( R_EAX, R_EAX );
19.665 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.666 - MEM_READ_LONG( R_EAX, R_EAX );
19.667 store_spreg( R_EAX, R_GBR );
19.668 sh4_x86.tstate = TSTATE_NONE;
19.669 :}
19.670 @@ -2388,9 +2333,8 @@
19.671 check_priv();
19.672 load_reg( R_EAX, Rm );
19.673 check_ralign32( R_EAX );
19.674 - MMU_TRANSLATE_READ( R_EAX );
19.675 + MEM_READ_LONG( R_EAX, R_EAX );
19.676 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.677 - MEM_READ_LONG( R_EAX, R_EAX );
19.678 call_func1( sh4_write_sr, R_EAX );
19.679 sh4_x86.fpuen_checked = FALSE;
19.680 sh4_x86.tstate = TSTATE_NONE;
19.681 @@ -2402,9 +2346,8 @@
19.682 check_priv();
19.683 load_reg( R_EAX, Rm );
19.684 check_ralign32( R_EAX );
19.685 - MMU_TRANSLATE_READ( R_EAX );
19.686 + MEM_READ_LONG( R_EAX, R_EAX );
19.687 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.688 - MEM_READ_LONG( R_EAX, R_EAX );
19.689 store_spreg( R_EAX, R_VBR );
19.690 sh4_x86.tstate = TSTATE_NONE;
19.691 :}
19.692 @@ -2413,9 +2356,8 @@
19.693 check_priv();
19.694 load_reg( R_EAX, Rm );
19.695 check_ralign32( R_EAX );
19.696 - MMU_TRANSLATE_READ( R_EAX );
19.697 + MEM_READ_LONG( R_EAX, R_EAX );
19.698 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.699 - MEM_READ_LONG( R_EAX, R_EAX );
19.700 store_spreg( R_EAX, R_SSR );
19.701 sh4_x86.tstate = TSTATE_NONE;
19.702 :}
19.703 @@ -2424,9 +2366,8 @@
19.704 check_priv();
19.705 load_reg( R_EAX, Rm );
19.706 check_ralign32( R_EAX );
19.707 - MMU_TRANSLATE_READ( R_EAX );
19.708 + MEM_READ_LONG( R_EAX, R_EAX );
19.709 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.710 - MEM_READ_LONG( R_EAX, R_EAX );
19.711 store_spreg( R_EAX, R_SGR );
19.712 sh4_x86.tstate = TSTATE_NONE;
19.713 :}
19.714 @@ -2435,9 +2376,8 @@
19.715 check_priv();
19.716 load_reg( R_EAX, Rm );
19.717 check_ralign32( R_EAX );
19.718 - MMU_TRANSLATE_READ( R_EAX );
19.719 + MEM_READ_LONG( R_EAX, R_EAX );
19.720 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.721 - MEM_READ_LONG( R_EAX, R_EAX );
19.722 store_spreg( R_EAX, R_SPC );
19.723 sh4_x86.tstate = TSTATE_NONE;
19.724 :}
19.725 @@ -2446,9 +2386,8 @@
19.726 check_priv();
19.727 load_reg( R_EAX, Rm );
19.728 check_ralign32( R_EAX );
19.729 - MMU_TRANSLATE_READ( R_EAX );
19.730 + MEM_READ_LONG( R_EAX, R_EAX );
19.731 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.732 - MEM_READ_LONG( R_EAX, R_EAX );
19.733 store_spreg( R_EAX, R_DBR );
19.734 sh4_x86.tstate = TSTATE_NONE;
19.735 :}
19.736 @@ -2457,9 +2396,8 @@
19.737 check_priv();
19.738 load_reg( R_EAX, Rm );
19.739 check_ralign32( R_EAX );
19.740 - MMU_TRANSLATE_READ( R_EAX );
19.741 + MEM_READ_LONG( R_EAX, R_EAX );
19.742 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.743 - MEM_READ_LONG( R_EAX, R_EAX );
19.744 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
19.745 sh4_x86.tstate = TSTATE_NONE;
19.746 :}
19.747 @@ -2476,9 +2414,8 @@
19.748 check_fpuen();
19.749 load_reg( R_EAX, Rm );
19.750 check_ralign32( R_EAX );
19.751 - MMU_TRANSLATE_READ( R_EAX );
19.752 + MEM_READ_LONG( R_EAX, R_EAX );
19.753 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.754 - MEM_READ_LONG( R_EAX, R_EAX );
19.755 call_func1( sh4_write_fpscr, R_EAX );
19.756 sh4_x86.tstate = TSTATE_NONE;
19.757 return 2;
19.758 @@ -2494,9 +2431,8 @@
19.759 check_fpuen();
19.760 load_reg( R_EAX, Rm );
19.761 check_ralign32( R_EAX );
19.762 - MMU_TRANSLATE_READ( R_EAX );
19.763 + MEM_READ_LONG( R_EAX, R_EAX );
19.764 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.765 - MEM_READ_LONG( R_EAX, R_EAX );
19.766 store_spreg( R_EAX, R_FPUL );
19.767 sh4_x86.tstate = TSTATE_NONE;
19.768 :}
19.769 @@ -2509,9 +2445,8 @@
19.770 COUNT_INST(I_LDSM);
19.771 load_reg( R_EAX, Rm );
19.772 check_ralign32( R_EAX );
19.773 - MMU_TRANSLATE_READ( R_EAX );
19.774 + MEM_READ_LONG( R_EAX, R_EAX );
19.775 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.776 - MEM_READ_LONG( R_EAX, R_EAX );
19.777 store_spreg( R_EAX, R_MACH );
19.778 sh4_x86.tstate = TSTATE_NONE;
19.779 :}
19.780 @@ -2524,9 +2459,8 @@
19.781 COUNT_INST(I_LDSM);
19.782 load_reg( R_EAX, Rm );
19.783 check_ralign32( R_EAX );
19.784 - MMU_TRANSLATE_READ( R_EAX );
19.785 + MEM_READ_LONG( R_EAX, R_EAX );
19.786 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.787 - MEM_READ_LONG( R_EAX, R_EAX );
19.788 store_spreg( R_EAX, R_MACL );
19.789 sh4_x86.tstate = TSTATE_NONE;
19.790 :}
19.791 @@ -2539,9 +2473,8 @@
19.792 COUNT_INST(I_LDSM);
19.793 load_reg( R_EAX, Rm );
19.794 check_ralign32( R_EAX );
19.795 - MMU_TRANSLATE_READ( R_EAX );
19.796 + MEM_READ_LONG( R_EAX, R_EAX );
19.797 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
19.798 - MEM_READ_LONG( R_EAX, R_EAX );
19.799 store_spreg( R_EAX, R_PR );
19.800 sh4_x86.tstate = TSTATE_NONE;
19.801 :}
19.802 @@ -2641,16 +2574,13 @@
19.803 STC.L SR, @-Rn {:
19.804 COUNT_INST(I_STCSRM);
19.805 check_priv();
19.806 + call_func0( sh4_read_sr );
19.807 + MOV_r32_r32( R_EAX, R_EDX );
19.808 load_reg( R_EAX, Rn );
19.809 check_walign32( R_EAX );
19.810 - ADD_imm8s_r32( -4, R_EAX );
19.811 - MMU_TRANSLATE_WRITE( R_EAX );
19.812 - MOV_r32_esp8( R_EAX, 0 );
19.813 - call_func0( sh4_read_sr );
19.814 - MOV_r32_r32( R_EAX, R_EDX );
19.815 - MOV_esp8_r32( 0, R_EAX );
19.816 + LEA_r32disp8_r32( R_EAX, -4, R_EAX );
19.817 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.818 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.819 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.820 sh4_x86.tstate = TSTATE_NONE;
19.821 :}
19.822 STC.L VBR, @-Rn {:
19.823 @@ -2659,10 +2589,9 @@
19.824 load_reg( R_EAX, Rn );
19.825 check_walign32( R_EAX );
19.826 ADD_imm8s_r32( -4, R_EAX );
19.827 - MMU_TRANSLATE_WRITE( R_EAX );
19.828 load_spreg( R_EDX, R_VBR );
19.829 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.830 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.831 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.832 sh4_x86.tstate = TSTATE_NONE;
19.833 :}
19.834 STC.L SSR, @-Rn {:
19.835 @@ -2671,10 +2600,9 @@
19.836 load_reg( R_EAX, Rn );
19.837 check_walign32( R_EAX );
19.838 ADD_imm8s_r32( -4, R_EAX );
19.839 - MMU_TRANSLATE_WRITE( R_EAX );
19.840 load_spreg( R_EDX, R_SSR );
19.841 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.842 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.843 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.844 sh4_x86.tstate = TSTATE_NONE;
19.845 :}
19.846 STC.L SPC, @-Rn {:
19.847 @@ -2683,10 +2611,9 @@
19.848 load_reg( R_EAX, Rn );
19.849 check_walign32( R_EAX );
19.850 ADD_imm8s_r32( -4, R_EAX );
19.851 - MMU_TRANSLATE_WRITE( R_EAX );
19.852 load_spreg( R_EDX, R_SPC );
19.853 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.854 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.855 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.856 sh4_x86.tstate = TSTATE_NONE;
19.857 :}
19.858 STC.L SGR, @-Rn {:
19.859 @@ -2695,10 +2622,9 @@
19.860 load_reg( R_EAX, Rn );
19.861 check_walign32( R_EAX );
19.862 ADD_imm8s_r32( -4, R_EAX );
19.863 - MMU_TRANSLATE_WRITE( R_EAX );
19.864 load_spreg( R_EDX, R_SGR );
19.865 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.866 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.867 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.868 sh4_x86.tstate = TSTATE_NONE;
19.869 :}
19.870 STC.L DBR, @-Rn {:
19.871 @@ -2707,10 +2633,9 @@
19.872 load_reg( R_EAX, Rn );
19.873 check_walign32( R_EAX );
19.874 ADD_imm8s_r32( -4, R_EAX );
19.875 - MMU_TRANSLATE_WRITE( R_EAX );
19.876 load_spreg( R_EDX, R_DBR );
19.877 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.878 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.879 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.880 sh4_x86.tstate = TSTATE_NONE;
19.881 :}
19.882 STC.L Rm_BANK, @-Rn {:
19.883 @@ -2719,10 +2644,9 @@
19.884 load_reg( R_EAX, Rn );
19.885 check_walign32( R_EAX );
19.886 ADD_imm8s_r32( -4, R_EAX );
19.887 - MMU_TRANSLATE_WRITE( R_EAX );
19.888 load_spreg( R_EDX, REG_OFFSET(r_bank[Rm_BANK]) );
19.889 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.890 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.891 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.892 sh4_x86.tstate = TSTATE_NONE;
19.893 :}
19.894 STC.L GBR, @-Rn {:
19.895 @@ -2730,10 +2654,9 @@
19.896 load_reg( R_EAX, Rn );
19.897 check_walign32( R_EAX );
19.898 ADD_imm8s_r32( -4, R_EAX );
19.899 - MMU_TRANSLATE_WRITE( R_EAX );
19.900 load_spreg( R_EDX, R_GBR );
19.901 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.902 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.903 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.904 sh4_x86.tstate = TSTATE_NONE;
19.905 :}
19.906 STS FPSCR, Rn {:
19.907 @@ -2748,10 +2671,9 @@
19.908 load_reg( R_EAX, Rn );
19.909 check_walign32( R_EAX );
19.910 ADD_imm8s_r32( -4, R_EAX );
19.911 - MMU_TRANSLATE_WRITE( R_EAX );
19.912 load_spreg( R_EDX, R_FPSCR );
19.913 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.914 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.915 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.916 sh4_x86.tstate = TSTATE_NONE;
19.917 :}
19.918 STS FPUL, Rn {:
19.919 @@ -2766,10 +2688,9 @@
19.920 load_reg( R_EAX, Rn );
19.921 check_walign32( R_EAX );
19.922 ADD_imm8s_r32( -4, R_EAX );
19.923 - MMU_TRANSLATE_WRITE( R_EAX );
19.924 load_spreg( R_EDX, R_FPUL );
19.925 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.926 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.927 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.928 sh4_x86.tstate = TSTATE_NONE;
19.929 :}
19.930 STS MACH, Rn {:
19.931 @@ -2782,10 +2703,9 @@
19.932 load_reg( R_EAX, Rn );
19.933 check_walign32( R_EAX );
19.934 ADD_imm8s_r32( -4, R_EAX );
19.935 - MMU_TRANSLATE_WRITE( R_EAX );
19.936 load_spreg( R_EDX, R_MACH );
19.937 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.938 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.939 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.940 sh4_x86.tstate = TSTATE_NONE;
19.941 :}
19.942 STS MACL, Rn {:
19.943 @@ -2798,10 +2718,9 @@
19.944 load_reg( R_EAX, Rn );
19.945 check_walign32( R_EAX );
19.946 ADD_imm8s_r32( -4, R_EAX );
19.947 - MMU_TRANSLATE_WRITE( R_EAX );
19.948 load_spreg( R_EDX, R_MACL );
19.949 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.950 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.951 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.952 sh4_x86.tstate = TSTATE_NONE;
19.953 :}
19.954 STS PR, Rn {:
19.955 @@ -2814,10 +2733,9 @@
19.956 load_reg( R_EAX, Rn );
19.957 check_walign32( R_EAX );
19.958 ADD_imm8s_r32( -4, R_EAX );
19.959 - MMU_TRANSLATE_WRITE( R_EAX );
19.960 load_spreg( R_EDX, R_PR );
19.961 + MEM_WRITE_LONG( R_EAX, R_EDX );
19.962 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
19.963 - MEM_WRITE_LONG( R_EAX, R_EDX );
19.964 sh4_x86.tstate = TSTATE_NONE;
19.965 :}
19.966
20.1 --- a/src/sh4/x86op.h Sat Dec 27 04:09:17 2008 +0000
20.2 +++ b/src/sh4/x86op.h Sat Jan 03 03:30:26 2009 +0000
20.3 @@ -55,12 +55,18 @@
20.4 #define LEA_sh4r_rptr(disp, r1) REXW(); LEA_sh4r_r32(disp,r1)
20.5 #define MOV_moffptr_EAX(offptr) REXW(); MOV_moff32_EAX( offptr )
20.6 #define load_exc_backpatch( x86reg ) REXW(); OP(0xB8 + x86reg); sh4_x86_add_backpatch( xlat_output, pc, -2 ); OP64( 0 )
20.7 +#define MOV_backpatch_esp8( disp ) REXW(); OP(0xC7); MODRM_r32_esp8(0, disp); sh4_x86_add_backpatch( xlat_output, pc, -2); OP64(0)
20.8 +
20.9 +/* imm64 operations are only defined for x86-64 */
20.10 +#define MOV_imm64_r32(i64,r1) REXW(); OP(0xB8+r1); OP64(i64)
20.11 +
20.12 #else /* 32-bit system */
20.13 #define OPPTR(x) OP32((uint32_t)(x))
20.14 #define AND_imm8s_rptr(imm, r1) AND_imm8s_r32( imm, r1 )
20.15 #define LEA_sh4r_rptr(disp, r1) LEA_sh4r_r32(disp,r1)
20.16 #define MOV_moffptr_EAX(offptr) MOV_moff32_EAX( offptr )
20.17 #define load_exc_backpatch( x86reg ) OP(0xB8 + x86reg); sh4_x86_add_backpatch( xlat_output, pc, -2 ); OP32( 0 )
20.18 +#define MOV_backpatch_esp8( disp ) OP(0xC7); MODRM_r32_esp8(0, disp); sh4_x86_add_backpatch( xlat_output, pc, -2); OP32(0)
20.19 #endif
20.20 #define STACK_ALIGN 16
20.21 #define POP_r32(r1) OP(0x58 + r1)
20.22 @@ -117,6 +123,9 @@
20.23
20.24 #define MODRM_r32_sh4r(r1,disp) if(disp>127){ MODRM_r32_ebp32(r1,disp);}else{ MODRM_r32_ebp8(r1,(unsigned char)disp); }
20.25
20.26 +/* Absolute displacement (no base) */
20.27 +#define MODRM_r32_disp32(r1,disp) OP(0x05 | (r1<<3)); OP32(disp)
20.28 +
20.29 #define REXW() OP(0x48)
20.30
20.31 /* Major opcodes */
20.32 @@ -133,6 +142,7 @@
20.33 #define AND_r32_r32(r1,r2) OP(0x23); MODRM_rm32_r32(r1,r2)
20.34 #define AND_imm8_r8(imm8, r1) OP(0x80); MODRM_rm32_r32(r1,4); OP(imm8)
20.35 #define AND_imm8s_r32(imm8,r1) OP(0x83); MODRM_rm32_r32(r1,4); OP(imm8)
20.36 +#define AND_imm8s_sh4r(imm8,disp) OP(0x83); MODRM_r32_sh4r(4,disp); OP(imm8)
20.37 #define AND_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,4); OP32(imm)
20.38 #define AND_sh4r_r32(disp,r1) OP(0x23); MODRM_r32_sh4r(r1, disp)
20.39 #define CALL_r32(r1) OP(0xFF); MODRM_rm32_r32(r1,2)
20.40 @@ -148,10 +158,13 @@
20.41 #define CMP_imm8s_sh4r(imm,disp) OP(0x83); MODRM_r32_sh4r(7,disp) OP(imm)
20.42 #define DEC_r32(r1) OP(0x48+r1)
20.43 #define IMUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,5)
20.44 +#define IMUL_esp8(disp) OP(0xF7); MODRM_r32_esp8(5,disp)
20.45 #define INC_r32(r1) OP(0x40+r1)
20.46 #define JMP_rel8(label) OP(0xEB); MARK_JMP8(label); OP(-1);
20.47 +#define JMP_r32disp8(r1,disp) OP(0xFF); OP(0x60 + r1); OP(disp)
20.48 #define LEA_sh4r_r32(disp,r1) OP(0x8D); MODRM_r32_sh4r(r1,disp)
20.49 #define LEA_r32disp8_r32(r1, disp, r2) OP(0x8D); OP( 0x40 + (r2<<3) + r1); OP(disp)
20.50 +#define MOV_imm32_r32(i32,r1) OP(0xB8+r1); OP32(i32)
20.51 #define MOV_r32_r32(r1,r2) OP(0x89); MODRM_r32_rm32(r1,r2)
20.52 #define MOV_r32_sh4r(r1,disp) OP(0x89); MODRM_r32_sh4r(r1,disp)
20.53 #define MOV_moff32_EAX(off) OP(0xA1); OPPTR(off)
21.1 --- a/src/test/testsh4x86.c Sat Dec 27 04:09:17 2008 +0000
21.2 +++ b/src/test/testsh4x86.c Sat Jan 03 03:30:26 2009 +0000
21.3 @@ -55,8 +55,6 @@
21.4 struct x86_symbol local_symbols[] = {
21.5 { "sh4r+128", ((char *)&sh4r)+128 },
21.6 { "sh4_cpu_period", &sh4_cpu_period },
21.7 - { "mmu_vma_to_phys_read", mmu_vma_to_phys_read },
21.8 - { "mmu_vma_to_phys_write", mmu_vma_to_phys_write },
21.9 { "sh4_address_space", 0x12345432 },
21.10 { "sh4_write_fpscr", sh4_write_fpscr },
21.11 { "sh4_write_sr", sh4_write_sr },
21.12 @@ -124,8 +122,12 @@
21.13 gboolean FASTCALL sh4_raise_exception( int exc ) { return TRUE; }
21.14 gboolean FASTCALL sh4_raise_tlb_exception( int exc ) { return TRUE; }
21.15 gboolean FASTCALL sh4_raise_trap( int exc ) { return TRUE; }
21.16 +void FASTCALL sh4_flush_store_queue( sh4addr_t addr ) { }
21.17 +void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc ) { }
21.18 uint32_t sh4_sleep_run_slice(uint32_t nanosecs) { return nanosecs; }
21.19 gboolean gui_error_dialog( const char *fmt, ... ) { return TRUE; }
21.20 +gboolean FASTCALL mmu_update_icache( sh4vma_t addr ) { return TRUE; }
21.21 +void MMU_ldtlb() { }
21.22 struct sh4_icache_struct sh4_icache;
21.23 struct mem_region_fn mem_region_unmapped;
21.24
22.1 --- a/src/x86dasm/x86dasm.c Sat Dec 27 04:09:17 2008 +0000
22.2 +++ b/src/x86dasm/x86dasm.c Sat Jan 03 03:30:26 2009 +0000
22.3 @@ -25,11 +25,10 @@
22.4 #include "sh4/sh4.h"
22.5 #include "sh4/sh4trans.h"
22.6
22.7 -extern const struct reg_desc_struct sh4_reg_map[];
22.8 const struct cpu_desc_struct x86_cpu_desc =
22.9 { "x86", (disasm_func_t)x86_disasm_instruction, NULL, mem_has_page,
22.10 NULL, NULL, NULL, 1,
22.11 - (char *)&sh4r, sizeof(sh4r), sh4_reg_map,
22.12 + NULL, 0, NULL,
22.13 &sh4r.pc };
22.14
22.15 static int x86_disasm_output( void *data, const char *format, ... );
.