Search
lxdream.org :: lxdream :: r571:9bc09948d0f2
lxdream 0.9.1
released Jun 29
Download Now
changeset571:9bc09948d0f2 lxdream-mmu
parent570:d2893980fbf5
child572:45be68680be1
authornkeynes
dateThu Jan 10 08:28:37 2008 +0000 (16 years ago)
branchlxdream-mmu
More MMU work in progess. Much better now...
Makefile.in
aclocal.m4
configure
configure.in
src/Makefile.am
src/Makefile.in
src/dreamcast.h
src/sh4/ia32abi.h
src/sh4/ia32asm.s
src/sh4/ia32mac.h
src/sh4/ia64abi.h
src/sh4/ia64asm.s
src/sh4/mmu.c
src/sh4/sh4.c
src/sh4/sh4.h
src/sh4/sh4core.h
src/sh4/sh4mem.c
src/sh4/sh4trans.c
src/sh4/sh4trans.h
src/sh4/sh4x86.c
src/sh4/sh4x86.in
src/sh4/xltcache.c
src/sh4/xltcache.h
src/test/testsh4x86.c
src/x86dasm/x86dasm.c
1.1 --- a/Makefile.in Sun Jan 06 12:24:18 2008 +0000
1.2 +++ b/Makefile.in Thu Jan 10 08:28:37 2008 +0000
1.3 @@ -55,9 +55,13 @@
1.4 BUILD_SH4X86_TRUE = @BUILD_SH4X86_TRUE@
1.5 BUILD_SYSTEST_FALSE = @BUILD_SYSTEST_FALSE@
1.6 BUILD_SYSTEST_TRUE = @BUILD_SYSTEST_TRUE@
1.7 +BUILD_X86_64_FALSE = @BUILD_X86_64_FALSE@
1.8 +BUILD_X86_64_TRUE = @BUILD_X86_64_TRUE@
1.9 CATALOGS = @CATALOGS@
1.10 CATOBJEXT = @CATOBJEXT@
1.11 CC = @CC@
1.12 +CCAS = @CCAS@
1.13 +CCASFLAGS = @CCASFLAGS@
1.14 CCDEPMODE = @CCDEPMODE@
1.15 CDROM_LINUX_FALSE = @CDROM_LINUX_FALSE@
1.16 CDROM_LINUX_TRUE = @CDROM_LINUX_TRUE@
1.17 @@ -103,6 +107,7 @@
1.18 MAKEINFO = @MAKEINFO@
1.19 MKINSTALLDIRS = @MKINSTALLDIRS@
1.20 MSGFMT = @MSGFMT@
1.21 +MSGFMT_OPTS = @MSGFMT_OPTS@
1.22 OBJEXT = @OBJEXT@
1.23 PACKAGE = @PACKAGE@
1.24 PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
2.1 --- a/aclocal.m4 Sun Jan 06 12:24:18 2008 +0000
2.2 +++ b/aclocal.m4 Thu Jan 10 08:28:37 2008 +0000
2.3 @@ -1006,6 +1006,38 @@
2.4
2.5 AU_DEFUN([fp_PROG_CC_STDC], [AM_PROG_CC_STDC])
2.6
2.7 +# Figure out how to run the assembler. -*- Autoconf -*-
2.8 +
2.9 +# serial 2
2.10 +
2.11 +# Copyright 2001 Free Software Foundation, Inc.
2.12 +
2.13 +# This program is free software; you can redistribute it and/or modify
2.14 +# it under the terms of the GNU General Public License as published by
2.15 +# the Free Software Foundation; either version 2, or (at your option)
2.16 +# any later version.
2.17 +
2.18 +# This program is distributed in the hope that it will be useful,
2.19 +# but WITHOUT ANY WARRANTY; without even the implied warranty of
2.20 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2.21 +# GNU General Public License for more details.
2.22 +
2.23 +# You should have received a copy of the GNU General Public License
2.24 +# along with this program; if not, write to the Free Software
2.25 +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
2.26 +# 02111-1307, USA.
2.27 +
2.28 +# AM_PROG_AS
2.29 +# ----------
2.30 +AC_DEFUN([AM_PROG_AS],
2.31 +[# By default we simply use the C compiler to build assembly code.
2.32 +AC_REQUIRE([AC_PROG_CC])
2.33 +: ${CCAS='$(CC)'}
2.34 +# Set ASFLAGS if not already set.
2.35 +: ${CCASFLAGS='$(CFLAGS)'}
2.36 +AC_SUBST(CCAS)
2.37 +AC_SUBST(CCASFLAGS)])
2.38 +
2.39 # pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
2.40 #
2.41 # Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
2.42 @@ -1144,7 +1176,8 @@
2.43
2.44 _PKG_TEXT
2.45 ])],
2.46 - [$4])
2.47 + [AC_MSG_RESULT([no])
2.48 + $4])
2.49 elif test $pkg_failed = untried; then
2.50 ifelse([$4], , [AC_MSG_FAILURE(dnl
2.51 [The pkg-config script could not be found or is too old. Make sure it
2.52 @@ -1266,7 +1299,8 @@
2.53 #-----------------
2.54 glib_DEFUN([GLIB_WITH_NLS],
2.55 dnl NLS is obligatory
2.56 - [USE_NLS=yes
2.57 + [AC_REQUIRE([AC_CANONICAL_HOST])dnl
2.58 + USE_NLS=yes
2.59 AC_SUBST(USE_NLS)
2.60
2.61 gt_cv_have_gettext=no
2.62 @@ -1370,6 +1404,20 @@
2.63 glib_save_LIBS="$LIBS"
2.64 LIBS="$LIBS $INTLLIBS"
2.65 AC_CHECK_FUNCS(dcgettext)
2.66 + MSGFMT_OPTS=
2.67 + AC_MSG_CHECKING([if msgfmt accepts -c])
2.68 + GLIB_RUN_PROG([$MSGFMT -c -o /dev/null],[
2.69 +msgid ""
2.70 +msgstr ""
2.71 +"Content-Type: text/plain; charset=UTF-8\n"
2.72 +"Project-Id-Version: test 1.0\n"
2.73 +"PO-Revision-Date: 2007-02-15 12:01+0100\n"
2.74 +"Last-Translator: test <foo@bar.xx>\n"
2.75 +"Language-Team: C <LL@li.org>\n"
2.76 +"MIME-Version: 1.0\n"
2.77 +"Content-Transfer-Encoding: 8bit\n"
2.78 +], [MSGFMT_OPTS=-c; AC_MSG_RESULT([yes])], [AC_MSG_RESULT([no])])
2.79 + AC_SUBST(MSGFMT_OPTS)
2.80 AC_PATH_PROG(GMSGFMT, gmsgfmt, $MSGFMT)
2.81 GLIB_PATH_PROG_WITH_TEST(XGETTEXT, xgettext,
2.82 [test -z "`$ac_dir/$ac_word -h 2>&1 | grep '(HELP)'`"], :)
2.83 @@ -1541,8 +1589,10 @@
2.84 [glib_REQUIRE([GLIB_GNU_GETTEXT])dnl
2.85 glib_save_prefix="$prefix"
2.86 glib_save_exec_prefix="$exec_prefix"
2.87 +glib_save_datarootdir="$datarootdir"
2.88 test "x$prefix" = xNONE && prefix=$ac_default_prefix
2.89 test "x$exec_prefix" = xNONE && exec_prefix=$prefix
2.90 +datarootdir=`eval echo "${datarootdir}"`
2.91 if test "x$CATOBJEXT" = "x.mo" ; then
2.92 localedir=`eval echo "${libdir}/locale"`
2.93 else
2.94 @@ -1550,6 +1600,7 @@
2.95 fi
2.96 prefix="$glib_save_prefix"
2.97 exec_prefix="$glib_save_exec_prefix"
2.98 +datarootdir="$glib_save_datarootdir"
2.99 AC_DEFINE_UNQUOTED($1, "$localedir",
2.100 [Define the location where the catalogs will be installed])
2.101 ])
2.102 @@ -1562,3 +1613,20 @@
2.103 AC_DEFUN([AM_GLIB_DEFINE_LOCALEDIR],[GLIB_DEFINE_LOCALEDIR($@)])
2.104 ])dnl
2.105
2.106 +# GLIB_RUN_PROG(PROGRAM, TEST-FILE, [ACTION-IF-PASS], [ACTION-IF-FAIL])
2.107 +#
2.108 +# Create a temporary file with TEST-FILE as its contents and pass the
2.109 +# file name to PROGRAM. Perform ACTION-IF-PASS if PROGRAM exits with
2.110 +# 0 and perform ACTION-IF-FAIL for any other exit status.
2.111 +AC_DEFUN([GLIB_RUN_PROG],
2.112 +[cat >conftest.foo <<_ACEOF
2.113 +$2
2.114 +_ACEOF
2.115 +if AC_RUN_LOG([$1 conftest.foo]); then
2.116 + m4_ifval([$3], [$3], [:])
2.117 +m4_ifvaln([$4], [else $4])dnl
2.118 +echo "$as_me: failed input was:" >&AS_MESSAGE_LOG_FD
2.119 +sed 's/^/| /' conftest.foo >&AS_MESSAGE_LOG_FD
2.120 +fi])
2.121 +
2.122 +
3.1 --- a/configure Sun Jan 06 12:24:18 2008 +0000
3.2 +++ b/configure Thu Jan 10 08:28:37 2008 +0000
3.3 @@ -687,6 +687,8 @@
3.4 CCDEPMODE
3.5 am__fastdepCC_TRUE
3.6 am__fastdepCC_FALSE
3.7 +CCAS
3.8 +CCASFLAGS
3.9 CPP
3.10 GREP
3.11 EGREP
3.12 @@ -709,6 +711,8 @@
3.13 GUI_GTK_FALSE
3.14 BUILD_SH4X86_TRUE
3.15 BUILD_SH4X86_FALSE
3.16 +BUILD_X86_64_TRUE
3.17 +BUILD_X86_64_FALSE
3.18 ESOUND_CFLAGS
3.19 ESOUND_LIBS
3.20 AUDIO_ESOUND_TRUE
3.21 @@ -728,6 +732,7 @@
3.22 GETTEXT_PACKAGE
3.23 USE_NLS
3.24 MSGFMT
3.25 +MSGFMT_OPTS
3.26 GMSGFMT
3.27 XGETTEXT
3.28 CATALOGS
3.29 @@ -4297,6 +4302,13 @@
3.30 *) CC="$CC $am_cv_prog_cc_stdc" ;;
3.31 esac
3.32
3.33 +# By default we simply use the C compiler to build assembly code.
3.34 +
3.35 +: ${CCAS='$(CC)'}
3.36 +# Set ASFLAGS if not already set.
3.37 +: ${CCASFLAGS='$(CFLAGS)'}
3.38 +
3.39 +
3.40 ac_ext=c
3.41 ac_cpp='$CPP $CPPFLAGS'
3.42 ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
3.43 @@ -5873,6 +5885,16 @@
3.44
3.45
3.46
3.47 +if test "$SH4_TRANSLATOR" = "x86_64"; then
3.48 + BUILD_X86_64_TRUE=
3.49 + BUILD_X86_64_FALSE='#'
3.50 +else
3.51 + BUILD_X86_64_TRUE='#'
3.52 + BUILD_X86_64_FALSE=
3.53 +fi
3.54 +
3.55 +
3.56 +
3.57 pkg_failed=no
3.58 { echo "$as_me:$LINENO: checking for ESOUND" >&5
3.59 echo $ECHO_N "checking for ESOUND... $ECHO_C" >&6; }
3.60 @@ -5931,7 +5953,9 @@
3.61 # Put the nasty error message in config.log where it belongs
3.62 echo "$ESOUND_PKG_ERRORS" >&5
3.63
3.64 - echo "Warning: esound not found - building without audio support"
3.65 + { echo "$as_me:$LINENO: result: no" >&5
3.66 +echo "${ECHO_T}no" >&6; }
3.67 + echo "Warning: esound not found - building without audio support"
3.68 elif test $pkg_failed = untried; then
3.69 echo "Warning: esound not found - building without audio support"
3.70 else
3.71 @@ -6587,7 +6611,7 @@
3.72
3.73 fi
3.74 fi
3.75 - USE_NLS=yes
3.76 + USE_NLS=yes
3.77
3.78
3.79 gt_cv_have_gettext=no
3.80 @@ -7542,6 +7566,35 @@
3.81 fi
3.82 done
3.83
3.84 + MSGFMT_OPTS=
3.85 + { echo "$as_me:$LINENO: checking if msgfmt accepts -c" >&5
3.86 +echo $ECHO_N "checking if msgfmt accepts -c... $ECHO_C" >&6; }
3.87 + cat >conftest.foo <<_ACEOF
3.88 +
3.89 +msgid ""
3.90 +msgstr ""
3.91 +"Content-Type: text/plain; charset=UTF-8\n"
3.92 +"Project-Id-Version: test 1.0\n"
3.93 +"PO-Revision-Date: 2007-02-15 12:01+0100\n"
3.94 +"Last-Translator: test <foo@bar.xx>\n"
3.95 +"Language-Team: C <LL@li.org>\n"
3.96 +"MIME-Version: 1.0\n"
3.97 +"Content-Transfer-Encoding: 8bit\n"
3.98 +
3.99 +_ACEOF
3.100 +if { (echo "$as_me:$LINENO: \$MSGFMT -c -o /dev/null conftest.foo") >&5
3.101 + ($MSGFMT -c -o /dev/null conftest.foo) 2>&5
3.102 + ac_status=$?
3.103 + echo "$as_me:$LINENO: \$? = $ac_status" >&5
3.104 + (exit $ac_status); }; then
3.105 + MSGFMT_OPTS=-c; { echo "$as_me:$LINENO: result: yes" >&5
3.106 +echo "${ECHO_T}yes" >&6; }
3.107 +else { echo "$as_me:$LINENO: result: no" >&5
3.108 +echo "${ECHO_T}no" >&6; }
3.109 +echo "$as_me: failed input was:" >&5
3.110 +sed 's/^/| /' conftest.foo >&5
3.111 +fi
3.112 +
3.113 # Extract the first word of "gmsgfmt", so it can be a program name with args.
3.114 set dummy gmsgfmt; ac_word=$2
3.115 { echo "$as_me:$LINENO: checking for $ac_word" >&5
3.116 @@ -8021,6 +8074,13 @@
3.117 Usually this means the macro was only invoked conditionally." >&2;}
3.118 { (exit 1); exit 1; }; }
3.119 fi
3.120 +if test -z "${BUILD_X86_64_TRUE}" && test -z "${BUILD_X86_64_FALSE}"; then
3.121 + { { echo "$as_me:$LINENO: error: conditional \"BUILD_X86_64\" was never defined.
3.122 +Usually this means the macro was only invoked conditionally." >&5
3.123 +echo "$as_me: error: conditional \"BUILD_X86_64\" was never defined.
3.124 +Usually this means the macro was only invoked conditionally." >&2;}
3.125 + { (exit 1); exit 1; }; }
3.126 +fi
3.127 if test -z "${AUDIO_ESOUND_TRUE}" && test -z "${AUDIO_ESOUND_FALSE}"; then
3.128 { { echo "$as_me:$LINENO: error: conditional \"AUDIO_ESOUND\" was never defined.
3.129 Usually this means the macro was only invoked conditionally." >&5
3.130 @@ -8659,6 +8719,8 @@
3.131 CCDEPMODE!$CCDEPMODE$ac_delim
3.132 am__fastdepCC_TRUE!$am__fastdepCC_TRUE$ac_delim
3.133 am__fastdepCC_FALSE!$am__fastdepCC_FALSE$ac_delim
3.134 +CCAS!$CCAS$ac_delim
3.135 +CCASFLAGS!$CCASFLAGS$ac_delim
3.136 CPP!$CPP$ac_delim
3.137 GREP!$GREP$ac_delim
3.138 EGREP!$EGREP$ac_delim
3.139 @@ -8680,8 +8742,6 @@
3.140 GUI_GTK_TRUE!$GUI_GTK_TRUE$ac_delim
3.141 GUI_GTK_FALSE!$GUI_GTK_FALSE$ac_delim
3.142 BUILD_SH4X86_TRUE!$BUILD_SH4X86_TRUE$ac_delim
3.143 -BUILD_SH4X86_FALSE!$BUILD_SH4X86_FALSE$ac_delim
3.144 -ESOUND_CFLAGS!$ESOUND_CFLAGS$ac_delim
3.145 _ACEOF
3.146
3.147 if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 97; then
3.148 @@ -8723,6 +8783,10 @@
3.149 ac_delim='%!_!# '
3.150 for ac_last_try in false false false false false :; do
3.151 cat >conf$$subs.sed <<_ACEOF
3.152 +BUILD_SH4X86_FALSE!$BUILD_SH4X86_FALSE$ac_delim
3.153 +BUILD_X86_64_TRUE!$BUILD_X86_64_TRUE$ac_delim
3.154 +BUILD_X86_64_FALSE!$BUILD_X86_64_FALSE$ac_delim
3.155 +ESOUND_CFLAGS!$ESOUND_CFLAGS$ac_delim
3.156 ESOUND_LIBS!$ESOUND_LIBS$ac_delim
3.157 AUDIO_ESOUND_TRUE!$AUDIO_ESOUND_TRUE$ac_delim
3.158 AUDIO_ESOUND_FALSE!$AUDIO_ESOUND_FALSE$ac_delim
3.159 @@ -8741,6 +8805,7 @@
3.160 GETTEXT_PACKAGE!$GETTEXT_PACKAGE$ac_delim
3.161 USE_NLS!$USE_NLS$ac_delim
3.162 MSGFMT!$MSGFMT$ac_delim
3.163 +MSGFMT_OPTS!$MSGFMT_OPTS$ac_delim
3.164 GMSGFMT!$GMSGFMT$ac_delim
3.165 XGETTEXT!$XGETTEXT$ac_delim
3.166 CATALOGS!$CATALOGS$ac_delim
3.167 @@ -8758,7 +8823,7 @@
3.168 LTLIBOBJS!$LTLIBOBJS$ac_delim
3.169 _ACEOF
3.170
3.171 - if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 33; then
3.172 + if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 38; then
3.173 break
3.174 elif $ac_last_try; then
3.175 { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
4.1 --- a/configure.in Sun Jan 06 12:24:18 2008 +0000
4.2 +++ b/configure.in Thu Jan 10 08:28:37 2008 +0000
4.3 @@ -8,6 +8,7 @@
4.4 AC_ISC_POSIX
4.5 AC_PROG_CC
4.6 AM_PROG_CC_STDC
4.7 +AM_PROG_AS
4.8 AC_HEADER_STDC
4.9 AC_CANONICAL_BUILD
4.10 AC_CANONICAL_HOST
4.11 @@ -69,6 +70,7 @@
4.12 echo "Warning: No translator available for $host. Building emulation core only";;
4.13 esac
4.14 AM_CONDITIONAL(BUILD_SH4X86, [test "$SH4_TRANSLATOR" = "x86" -o "$SH4_TRANSLATOR" = "x86_64"])
4.15 +AM_CONDITIONAL(BUILD_X86_64, [test "$SH4_TRANSLATOR" = "x86_64"])
4.16
4.17 dnl ------------------ Optional driver support -------------------
4.18 dnl Check for esound
5.1 --- a/src/Makefile.am Sun Jan 06 12:24:18 2008 +0000
5.2 +++ b/src/Makefile.am Thu Jan 10 08:28:37 2008 +0000
5.3 @@ -48,16 +48,27 @@
5.4 sh4/ia32abi.h sh4/ia32mac.h sh4/ia64abi.h \
5.5 sh4/sh4trans.c sh4/sh4trans.h \
5.6 x86dasm/x86dasm.c x86dasm/x86dasm.h \
5.7 - x86dasm/i386-dis.c x86dasm/dis-init.c x86dasm/dis-buf.c
5.8 + x86dasm/i386-dis.c x86dasm/dis-init.c x86dasm/dis-buf.c
5.9
5.10 +test_testsh4x86_LDADD = @GTK_LIBS@
5.11 +
5.12 +check_PROGRAMS += test/testsh4x86
5.13 +
5.14 +if BUILD_X86_64
5.15 +lxdream_SOURCES += sh4/ia64asm.s
5.16 test_testsh4x86_SOURCES = test/testsh4x86.c x86dasm/x86dasm.c \
5.17 x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
5.18 x86dasm/dis-buf.c \
5.19 sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
5.20 - sh4/xltcache.h mem.c util.c
5.21 -test_testsh4x86_LDADD = @GTK_LIBS@
5.22 -
5.23 -check_PROGRAMS += test/testsh4x86
5.24 + sh4/xltcache.h mem.c util.c sh4/ia64asm.s
5.25 +else
5.26 +lxdream_SOURCES += sh4/ia32asm.s
5.27 +test_testsh4x86_SOURCES = test/testsh4x86.c x86dasm/x86dasm.c \
5.28 + x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
5.29 + x86dasm/dis-buf.c \
5.30 + sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
5.31 + sh4/xltcache.h mem.c util.c sh4/ia32asm.s
5.32 +endif
5.33
5.34 endif
5.35
6.1 --- a/src/Makefile.in Sun Jan 06 12:24:18 2008 +0000
6.2 +++ b/src/Makefile.in Thu Jan 10 08:28:37 2008 +0000
6.3 @@ -40,19 +40,21 @@
6.4 @BUILD_SH4X86_TRUE@ sh4/ia32abi.h sh4/ia32mac.h sh4/ia64abi.h \
6.5 @BUILD_SH4X86_TRUE@ sh4/sh4trans.c sh4/sh4trans.h \
6.6 @BUILD_SH4X86_TRUE@ x86dasm/x86dasm.c x86dasm/x86dasm.h \
6.7 -@BUILD_SH4X86_TRUE@ x86dasm/i386-dis.c x86dasm/dis-init.c x86dasm/dis-buf.c
6.8 +@BUILD_SH4X86_TRUE@ x86dasm/i386-dis.c x86dasm/dis-init.c x86dasm/dis-buf.c
6.9
6.10 @BUILD_SH4X86_TRUE@am__append_2 = test/testsh4x86
6.11 -@GUI_GTK_TRUE@am__append_3 = gtkui/gtkui.c gtkui/gtkui.h \
6.12 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@am__append_3 = sh4/ia64asm.s
6.13 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@am__append_4 = sh4/ia32asm.s
6.14 +@GUI_GTK_TRUE@am__append_5 = gtkui/gtkui.c gtkui/gtkui.h \
6.15 @GUI_GTK_TRUE@ gtkui/main_win.c gtkui/gtkcb.c \
6.16 @GUI_GTK_TRUE@ gtkui/mmio_win.c gtkui/debug_win.c gtkui/dump_win.c \
6.17 @GUI_GTK_TRUE@ gtkui/ctrl_dlg.c gtkui/path_dlg.c gtkui/gdrom_menu.c \
6.18 @GUI_GTK_TRUE@ drivers/video_gtk.c drivers/video_gtk.h \
6.19 @GUI_GTK_TRUE@ drivers/video_glx.c drivers/video_glx.h
6.20
6.21 -@CDROM_LINUX_TRUE@am__append_4 = drivers/cd_linux.c
6.22 -@CDROM_LINUX_FALSE@am__append_5 = drivers/cd_none.c
6.23 -@AUDIO_ESOUND_TRUE@am__append_6 = drivers/audio_esd.c
6.24 +@CDROM_LINUX_TRUE@am__append_6 = drivers/cd_linux.c
6.25 +@CDROM_LINUX_FALSE@am__append_7 = drivers/cd_none.c
6.26 +@AUDIO_ESOUND_TRUE@am__append_8 = drivers/audio_esd.c
6.27 ACLOCAL = @ACLOCAL@
6.28 AMDEP_FALSE = @AMDEP_FALSE@
6.29 AMDEP_TRUE = @AMDEP_TRUE@
6.30 @@ -72,9 +74,13 @@
6.31 BUILD_SH4X86_TRUE = @BUILD_SH4X86_TRUE@
6.32 BUILD_SYSTEST_FALSE = @BUILD_SYSTEST_FALSE@
6.33 BUILD_SYSTEST_TRUE = @BUILD_SYSTEST_TRUE@
6.34 +BUILD_X86_64_FALSE = @BUILD_X86_64_FALSE@
6.35 +BUILD_X86_64_TRUE = @BUILD_X86_64_TRUE@
6.36 CATALOGS = @CATALOGS@
6.37 CATOBJEXT = @CATOBJEXT@
6.38 CC = @CC@
6.39 +CCAS = @CCAS@
6.40 +CCASFLAGS = @CCASFLAGS@
6.41 CCDEPMODE = @CCDEPMODE@
6.42 CDROM_LINUX_FALSE = @CDROM_LINUX_FALSE@
6.43 CDROM_LINUX_TRUE = @CDROM_LINUX_TRUE@
6.44 @@ -120,6 +126,7 @@
6.45 MAKEINFO = @MAKEINFO@
6.46 MKINSTALLDIRS = @MKINSTALLDIRS@
6.47 MSGFMT = @MSGFMT@
6.48 +MSGFMT_OPTS = @MSGFMT_OPTS@
6.49 OBJEXT = @OBJEXT@
6.50 PACKAGE = @PACKAGE@
6.51 PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
6.52 @@ -226,15 +233,21 @@
6.53 drivers/audio_null.c drivers/video_null.c \
6.54 drivers/gl_common.c drivers/gl_common.h drivers/gl_fbo.c \
6.55 drivers/gl_sl.c drivers/gl_slsrc.c\
6.56 -$(am__append_1) $(am__append_3) $(am__append_4) $(am__append_5) $(am__append_6)
6.57 -
6.58 -@BUILD_SH4X86_TRUE@test_testsh4x86_SOURCES = test/testsh4x86.c x86dasm/x86dasm.c \
6.59 -@BUILD_SH4X86_TRUE@ x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
6.60 -@BUILD_SH4X86_TRUE@ x86dasm/dis-buf.c \
6.61 -@BUILD_SH4X86_TRUE@ sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
6.62 -@BUILD_SH4X86_TRUE@ sh4/xltcache.h mem.c util.c
6.63 +$(am__append_1) $(am__append_3) $(am__append_4) $(am__append_5) $(am__append_6) $(am__append_7) $(am__append_8)
6.64
6.65 @BUILD_SH4X86_TRUE@test_testsh4x86_LDADD = @GTK_LIBS@
6.66 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@test_testsh4x86_SOURCES = test/testsh4x86.c x86dasm/x86dasm.c \
6.67 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
6.68 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ x86dasm/dis-buf.c \
6.69 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
6.70 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ sh4/xltcache.h mem.c util.c sh4/ia32asm.s
6.71 +
6.72 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@test_testsh4x86_SOURCES = test/testsh4x86.c x86dasm/x86dasm.c \
6.73 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
6.74 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ x86dasm/dis-buf.c \
6.75 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c sh4/xltcache.c \
6.76 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ sh4/xltcache.h mem.c util.c sh4/ia64asm.s
6.77 +
6.78
6.79 lxdream_LDADD = @GTK_LIBS@ @LIBPNG_LIBS@ @ESOUND_LIBS@ $(INTLLIBS)
6.80
6.81 @@ -284,24 +297,26 @@
6.82 sh4/sh4x86.c sh4/x86op.h sh4/ia32abi.h sh4/ia32mac.h \
6.83 sh4/ia64abi.h sh4/sh4trans.c sh4/sh4trans.h x86dasm/x86dasm.c \
6.84 x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
6.85 - x86dasm/dis-buf.c gtkui/gtkui.c gtkui/gtkui.h gtkui/main_win.c \
6.86 - gtkui/gtkcb.c gtkui/mmio_win.c gtkui/debug_win.c \
6.87 - gtkui/dump_win.c gtkui/ctrl_dlg.c gtkui/path_dlg.c \
6.88 - gtkui/gdrom_menu.c drivers/video_gtk.c drivers/video_gtk.h \
6.89 - drivers/video_glx.c drivers/video_glx.h drivers/cd_linux.c \
6.90 - drivers/cd_none.c drivers/audio_esd.c
6.91 + x86dasm/dis-buf.c sh4/ia64asm.s sh4/ia32asm.s gtkui/gtkui.c \
6.92 + gtkui/gtkui.h gtkui/main_win.c gtkui/gtkcb.c gtkui/mmio_win.c \
6.93 + gtkui/debug_win.c gtkui/dump_win.c gtkui/ctrl_dlg.c \
6.94 + gtkui/path_dlg.c gtkui/gdrom_menu.c drivers/video_gtk.c \
6.95 + drivers/video_gtk.h drivers/video_glx.c drivers/video_glx.h \
6.96 + drivers/cd_linux.c drivers/cd_none.c drivers/audio_esd.c
6.97 @BUILD_SH4X86_TRUE@am__objects_1 = sh4x86.$(OBJEXT) sh4trans.$(OBJEXT) \
6.98 @BUILD_SH4X86_TRUE@ x86dasm.$(OBJEXT) i386-dis.$(OBJEXT) \
6.99 @BUILD_SH4X86_TRUE@ dis-init.$(OBJEXT) dis-buf.$(OBJEXT)
6.100 -@GUI_GTK_TRUE@am__objects_2 = gtkui.$(OBJEXT) main_win.$(OBJEXT) \
6.101 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@am__objects_2 = ia64asm.$(OBJEXT)
6.102 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@am__objects_3 = ia32asm.$(OBJEXT)
6.103 +@GUI_GTK_TRUE@am__objects_4 = gtkui.$(OBJEXT) main_win.$(OBJEXT) \
6.104 @GUI_GTK_TRUE@ gtkcb.$(OBJEXT) mmio_win.$(OBJEXT) \
6.105 @GUI_GTK_TRUE@ debug_win.$(OBJEXT) dump_win.$(OBJEXT) \
6.106 @GUI_GTK_TRUE@ ctrl_dlg.$(OBJEXT) path_dlg.$(OBJEXT) \
6.107 @GUI_GTK_TRUE@ gdrom_menu.$(OBJEXT) video_gtk.$(OBJEXT) \
6.108 @GUI_GTK_TRUE@ video_glx.$(OBJEXT)
6.109 -@CDROM_LINUX_TRUE@am__objects_3 = cd_linux.$(OBJEXT)
6.110 -@CDROM_LINUX_FALSE@am__objects_4 = cd_none.$(OBJEXT)
6.111 -@AUDIO_ESOUND_TRUE@am__objects_5 = audio_esd.$(OBJEXT)
6.112 +@CDROM_LINUX_TRUE@am__objects_5 = cd_linux.$(OBJEXT)
6.113 +@CDROM_LINUX_FALSE@am__objects_6 = cd_none.$(OBJEXT)
6.114 +@AUDIO_ESOUND_TRUE@am__objects_7 = audio_esd.$(OBJEXT)
6.115 am_lxdream_OBJECTS = main.$(OBJEXT) config.$(OBJEXT) mem.$(OBJEXT) \
6.116 watch.$(OBJEXT) asic.$(OBJEXT) syscall.$(OBJEXT) bios.$(OBJEXT) \
6.117 dcload.$(OBJEXT) ide.$(OBJEXT) gdimage.$(OBJEXT) \
6.118 @@ -320,20 +335,42 @@
6.119 audio_null.$(OBJEXT) video_null.$(OBJEXT) gl_common.$(OBJEXT) \
6.120 gl_fbo.$(OBJEXT) gl_sl.$(OBJEXT) gl_slsrc.$(OBJEXT) \
6.121 $(am__objects_1) $(am__objects_2) $(am__objects_3) \
6.122 - $(am__objects_4) $(am__objects_5)
6.123 + $(am__objects_4) $(am__objects_5) $(am__objects_6) \
6.124 + $(am__objects_7)
6.125 lxdream_OBJECTS = $(am_lxdream_OBJECTS)
6.126 lxdream_DEPENDENCIES =
6.127 lxdream_LDFLAGS =
6.128 am__test_testsh4x86_SOURCES_DIST = test/testsh4x86.c x86dasm/x86dasm.c \
6.129 x86dasm/x86dasm.h x86dasm/i386-dis.c x86dasm/dis-init.c \
6.130 x86dasm/dis-buf.c sh4/sh4dasm.c sh4/sh4trans.c sh4/sh4x86.c \
6.131 - sh4/xltcache.c sh4/xltcache.h mem.c util.c
6.132 -@BUILD_SH4X86_TRUE@am_test_testsh4x86_OBJECTS = testsh4x86.$(OBJEXT) \
6.133 -@BUILD_SH4X86_TRUE@ x86dasm.$(OBJEXT) i386-dis.$(OBJEXT) \
6.134 -@BUILD_SH4X86_TRUE@ dis-init.$(OBJEXT) dis-buf.$(OBJEXT) \
6.135 -@BUILD_SH4X86_TRUE@ sh4dasm.$(OBJEXT) sh4trans.$(OBJEXT) \
6.136 -@BUILD_SH4X86_TRUE@ sh4x86.$(OBJEXT) xltcache.$(OBJEXT) \
6.137 -@BUILD_SH4X86_TRUE@ mem.$(OBJEXT) util.$(OBJEXT)
6.138 + sh4/xltcache.c sh4/xltcache.h mem.c util.c sh4/ia32asm.s \
6.139 + sh4/ia64asm.s
6.140 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@am_test_testsh4x86_OBJECTS = \
6.141 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ testsh4x86.$(OBJEXT) \
6.142 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ x86dasm.$(OBJEXT) \
6.143 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ i386-dis.$(OBJEXT) \
6.144 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ dis-init.$(OBJEXT) \
6.145 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ dis-buf.$(OBJEXT) \
6.146 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ sh4dasm.$(OBJEXT) \
6.147 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ sh4trans.$(OBJEXT) \
6.148 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ sh4x86.$(OBJEXT) \
6.149 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ xltcache.$(OBJEXT) \
6.150 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ mem.$(OBJEXT) \
6.151 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ util.$(OBJEXT) \
6.152 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_TRUE@ ia64asm.$(OBJEXT)
6.153 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@am_test_testsh4x86_OBJECTS = \
6.154 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ testsh4x86.$(OBJEXT) \
6.155 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ x86dasm.$(OBJEXT) \
6.156 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ i386-dis.$(OBJEXT) \
6.157 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ dis-init.$(OBJEXT) \
6.158 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ dis-buf.$(OBJEXT) \
6.159 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ sh4dasm.$(OBJEXT) \
6.160 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ sh4trans.$(OBJEXT) \
6.161 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ sh4x86.$(OBJEXT) \
6.162 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ xltcache.$(OBJEXT) \
6.163 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ mem.$(OBJEXT) \
6.164 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ util.$(OBJEXT) \
6.165 +@BUILD_SH4X86_TRUE@@BUILD_X86_64_FALSE@ ia32asm.$(OBJEXT)
6.166 test_testsh4x86_OBJECTS = $(am_test_testsh4x86_OBJECTS)
6.167 @BUILD_SH4X86_TRUE@test_testsh4x86_DEPENDENCIES =
6.168 @BUILD_SH4X86_FALSE@test_testsh4x86_DEPENDENCIES =
6.169 @@ -393,6 +430,7 @@
6.170 $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
6.171 CCLD = $(CC)
6.172 LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
6.173 +CCASCOMPILE = $(CCAS) $(AM_CCASFLAGS) $(CCASFLAGS)
6.174 DIST_SOURCES = $(gendec_SOURCES) $(genglsl_SOURCES) \
6.175 $(am__lxdream_SOURCES_DIST) $(am__test_testsh4x86_SOURCES_DIST) \
6.176 $(test_testxlt_SOURCES)
6.177 @@ -403,7 +441,7 @@
6.178 $(MAKE) $(AM_MAKEFLAGS) all-am
6.179
6.180 .SUFFIXES:
6.181 -.SUFFIXES: .c .o .obj
6.182 +.SUFFIXES: .c .o .obj .s
6.183 $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4)
6.184 cd $(top_srcdir) && \
6.185 $(AUTOMAKE) --gnu src/Makefile
6.186 @@ -2041,6 +2079,24 @@
6.187 @AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/testxlt.Po' tmpdepfile='$(DEPDIR)/testxlt.TPo' @AMDEPBACKSLASH@
6.188 @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
6.189 @am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o testxlt.obj `if test -f 'test/testxlt.c'; then $(CYGPATH_W) 'test/testxlt.c'; else $(CYGPATH_W) '$(srcdir)/test/testxlt.c'; fi`
6.190 +
6.191 +.s.o:
6.192 + $(CCASCOMPILE) -c `test -f '$<' || echo '$(srcdir)/'`$<
6.193 +
6.194 +.s.obj:
6.195 + $(CCASCOMPILE) -c `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`
6.196 +
6.197 +ia64asm.o: sh4/ia64asm.s
6.198 + $(CCAS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o ia64asm.o `test -f 'sh4/ia64asm.s' || echo '$(srcdir)/'`sh4/ia64asm.s
6.199 +
6.200 +ia64asm.obj: sh4/ia64asm.s
6.201 + $(CCAS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o ia64asm.obj `if test -f 'sh4/ia64asm.s'; then $(CYGPATH_W) 'sh4/ia64asm.s'; else $(CYGPATH_W) '$(srcdir)/sh4/ia64asm.s'; fi`
6.202 +
6.203 +ia32asm.o: sh4/ia32asm.s
6.204 + $(CCAS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o ia32asm.o `test -f 'sh4/ia32asm.s' || echo '$(srcdir)/'`sh4/ia32asm.s
6.205 +
6.206 +ia32asm.obj: sh4/ia32asm.s
6.207 + $(CCAS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o ia32asm.obj `if test -f 'sh4/ia32asm.s'; then $(CYGPATH_W) 'sh4/ia32asm.s'; else $(CYGPATH_W) '$(srcdir)/sh4/ia32asm.s'; fi`
6.208 uninstall-info-am:
6.209
6.210 ETAGS = etags
7.1 --- a/src/dreamcast.h Sun Jan 06 12:24:18 2008 +0000
7.2 +++ b/src/dreamcast.h Thu Jan 10 08:28:37 2008 +0000
7.3 @@ -60,7 +60,7 @@
7.4 void dreamcast_program_loaded( const gchar *name, sh4addr_t entry_point );
7.5
7.6 #define DREAMCAST_SAVE_MAGIC "%!-lxDream!Save\0"
7.7 -#define DREAMCAST_SAVE_VERSION 0x00010002
7.8 +#define DREAMCAST_SAVE_VERSION 0x00010003
7.9
7.10 int dreamcast_save_state( const gchar *filename );
7.11 int dreamcast_load_state( const gchar *filename );
8.1 --- a/src/sh4/ia32abi.h Sun Jan 06 12:24:18 2008 +0000
8.2 +++ b/src/sh4/ia32abi.h Thu Jan 10 08:28:37 2008 +0000
8.3 @@ -80,19 +80,16 @@
8.4 {
8.5 PUSH_r32(addr);
8.6 call_func0(sh4_read_long);
8.7 - POP_r32(addr);
8.8 + POP_r32(R_ECX);
8.9 PUSH_r32(R_EAX);
8.10 - ADD_imm8s_r32( 4, addr );
8.11 - PUSH_r32(addr);
8.12 + ADD_imm8s_r32( 4, R_ECX );
8.13 + PUSH_r32(R_ECX);
8.14 call_func0(sh4_read_long);
8.15 ADD_imm8s_r32( 4, R_ESP );
8.16 MOV_r32_r32( R_EAX, arg2b );
8.17 POP_r32(arg2a);
8.18 }
8.19
8.20 -#define EXIT_BLOCK_SIZE 29
8.21 -
8.22 -
8.23 /**
8.24 * Emit the 'start of block' assembly. Sets up the stack frame and save
8.25 * SI/DI as required
8.26 @@ -108,8 +105,9 @@
8.27 sh4_x86.fpuen_checked = FALSE;
8.28 sh4_x86.branch_taken = FALSE;
8.29 sh4_x86.backpatch_posn = 0;
8.30 + sh4_x86.recovery_posn = 0;
8.31 sh4_x86.block_start_pc = pc;
8.32 - sh4_x86.tlb_on = MMIO_READ(MMU,MMUCR)&MMUCR_AT;
8.33 + sh4_x86.tlb_on = IS_MMU_ENABLED();
8.34 sh4_x86.tstate = TSTATE_NONE;
8.35 #ifdef STACK_ALIGN
8.36 sh4_x86.stack_posn = 8;
8.37 @@ -134,6 +132,9 @@
8.38 RET();
8.39 }
8.40
8.41 +#define EXIT_BLOCK_SIZE(pc) (24 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
8.42 +
8.43 +
8.44 /**
8.45 * Exit the block to an absolute PC
8.46 */
8.47 @@ -141,7 +142,37 @@
8.48 {
8.49 load_imm32( R_ECX, pc ); // 5
8.50 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
8.51 - MOV_moff32_EAX( xlat_get_lut_entry(pc) ); // 5
8.52 + if( IS_IN_ICACHE(pc) ) {
8.53 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
8.54 + } else if( sh4_x86.tlb_on ) {
8.55 + call_func1(xlat_get_code_by_vma,R_ECX);
8.56 + } else {
8.57 + call_func1(xlat_get_code,R_ECX);
8.58 + }
8.59 + AND_imm8s_r32( 0xFC, R_EAX ); // 3
8.60 + load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
8.61 + ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
8.62 + POP_r32(R_EBP);
8.63 + RET();
8.64 +}
8.65 +
8.66 +#define EXIT_BLOCK_REL_SIZE(pc) (27 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
8.67 +
8.68 +/**
8.69 + * Exit the block to a relative PC
8.70 + */
8.71 +void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
8.72 +{
8.73 + load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
8.74 + ADD_sh4r_r32( R_PC, R_ECX );
8.75 + store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
8.76 + if( IS_IN_ICACHE(pc) ) {
8.77 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
8.78 + } else if( sh4_x86.tlb_on ) {
8.79 + call_func1(xlat_get_code_by_vma,R_ECX);
8.80 + } else {
8.81 + call_func1(xlat_get_code,R_ECX);
8.82 + }
8.83 AND_imm8s_r32( 0xFC, R_EAX ); // 3
8.84 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
8.85 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
8.86 @@ -155,23 +186,22 @@
8.87 void sh4_translate_end_block( sh4addr_t pc ) {
8.88 if( sh4_x86.branch_taken == FALSE ) {
8.89 // Didn't exit unconditionally already, so write the termination here
8.90 - exit_block( pc, pc );
8.91 + exit_block_rel( pc, pc );
8.92 }
8.93 if( sh4_x86.backpatch_posn != 0 ) {
8.94 unsigned int i;
8.95 // Raise exception
8.96 uint8_t *end_ptr = xlat_output;
8.97 - load_spreg( R_ECX, REG_OFFSET(pc) );
8.98 + MOV_r32_r32( R_EDX, R_ECX );
8.99 ADD_r32_r32( R_EDX, R_ECX );
8.100 - ADD_r32_r32( R_EDX, R_ECX );
8.101 - store_spreg( R_ECX, REG_OFFSET(pc) );
8.102 + ADD_r32_sh4r( R_ECX, R_PC );
8.103 MOV_moff32_EAX( &sh4_cpu_period );
8.104 MUL_r32( R_EDX );
8.105 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
8.106
8.107 call_func0( sh4_raise_exception );
8.108 ADD_imm8s_r32( 4, R_ESP );
8.109 - load_spreg( R_EAX, REG_OFFSET(pc) );
8.110 + load_spreg( R_EAX, R_PC );
8.111 if( sh4_x86.tlb_on ) {
8.112 call_func1(xlat_get_code_by_vma,R_EAX);
8.113 } else {
8.114 @@ -182,14 +212,13 @@
8.115
8.116 // Exception already raised - just cleanup
8.117 uint8_t *preexc_ptr = xlat_output;
8.118 - load_imm32( R_ECX, sh4_x86.block_start_pc );
8.119 + MOV_r32_r32( R_EDX, R_ECX );
8.120 ADD_r32_r32( R_EDX, R_ECX );
8.121 - ADD_r32_r32( R_EDX, R_ECX );
8.122 - store_spreg( R_ECX, REG_OFFSET(spc) );
8.123 + ADD_r32_sh4r( R_ECX, R_SPC );
8.124 MOV_moff32_EAX( &sh4_cpu_period );
8.125 MUL_r32( R_EDX );
8.126 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
8.127 - load_spreg( R_EAX, REG_OFFSET(pc) );
8.128 + load_spreg( R_EAX, R_PC );
8.129 if( sh4_x86.tlb_on ) {
8.130 call_func1(xlat_get_code_by_vma,R_EAX);
8.131 } else {
9.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
9.2 +++ b/src/sh4/ia32asm.s Thu Jan 10 08:28:37 2008 +0000
9.3 @@ -0,0 +1,34 @@
9.4 +#
9.5 +# Scan back through the stack until we hit the currently executing
9.6 +# translation block, and find the call return address to that block.
9.7 +#
9.8 +# Implementation: iterate back through each stack frame until we find
9.9 +# a frame that has a saved %ebp == sh4r (setup by the xlat blocks).
9.10 +# The return address is then the stack value immediately before the
9.11 +# saved %ebp.
9.12 +#
9.13 +# At most 8 stack frames are checked, to prevent infinite looping on a
9.14 +# corrupt stack.
9.15 +
9.16 +.global xlat_get_native_pc
9.17 +xlat_get_native_pc:
9.18 + mov %ebp, %eax
9.19 + mov $0x8, %ecx
9.20 + mov $sh4r, %edx
9.21 +
9.22 +frame_loop:
9.23 + test %eax, %eax
9.24 + je frame_not_found
9.25 + cmp (%eax), %edx
9.26 + je frame_found
9.27 + sub $0x1, %ecx
9.28 + je frame_not_found
9.29 + movl (%eax), %eax
9.30 + jmp frame_loop
9.31 +
9.32 +frame_found:
9.33 + movl 0x4(%eax), %eax
9.34 + ret
9.35 +frame_not_found:
9.36 + xor %eax, %eax
9.37 + ret
10.1 --- a/src/sh4/ia32mac.h Sun Jan 06 12:24:18 2008 +0000
10.2 +++ b/src/sh4/ia32mac.h Thu Jan 10 08:28:37 2008 +0000
10.3 @@ -101,11 +101,11 @@
10.4 PUSH_r32(addr);
10.5 load_imm32(R_EAX, (uint32_t)sh4_read_long);
10.6 CALL_r32(R_EAX);
10.7 - POP_r32(addr);
10.8 + POP_r32(R_ECX);
10.9 SUB_imm8s_r32( adj2-adj, R_ESP );
10.10 PUSH_r32(R_EAX);
10.11 - ADD_imm8s_r32( 4, addr );
10.12 - PUSH_r32(addr);
10.13 + ADD_imm8s_r32( 4, R_ECX );
10.14 + PUSH_r32(R_ECX);
10.15 load_imm32(R_EAX, (uint32_t)sh4_read_long);
10.16 CALL_r32(R_EAX);
10.17 ADD_imm8s_r32( 4, R_ESP );
10.18 @@ -115,9 +115,6 @@
10.19 sh4_x86.stack_posn -= 4;
10.20 }
10.21
10.22 -#define EXIT_BLOCK_SIZE 29
10.23 -
10.24 -
10.25 /**
10.26 * Emit the 'start of block' assembly. Sets up the stack frame and save
10.27 * SI/DI as required
10.28 @@ -133,9 +130,10 @@
10.29 sh4_x86.fpuen_checked = FALSE;
10.30 sh4_x86.branch_taken = FALSE;
10.31 sh4_x86.backpatch_posn = 0;
10.32 + sh4_x86.recovery_posn = 0;
10.33 sh4_x86.block_start_pc = pc;
10.34 sh4_x86.tstate = TSTATE_NONE;
10.35 - sh4_x86.tlb_on = MMIO_READ(MMU,MMUCR)&MMUCR_AT;
10.36 + sh4_x86.tlb_on = IS_MMU_ENABLED();
10.37 sh4_x86.stack_posn = 8;
10.38 }
10.39
10.40 @@ -157,6 +155,9 @@
10.41 RET();
10.42 }
10.43
10.44 +#define EXIT_BLOCK_SIZE(pc) (24 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
10.45 +
10.46 +
10.47 /**
10.48 * Exit the block to an absolute PC
10.49 */
10.50 @@ -164,7 +165,37 @@
10.51 {
10.52 load_imm32( R_ECX, pc ); // 5
10.53 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
10.54 - MOV_moff32_EAX( xlat_get_lut_entry(pc) ); // 5
10.55 + if( IS_IN_ICACHE(pc) ) {
10.56 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
10.57 + } else if( sh4_x86.tlb_on ) {
10.58 + call_func1(xlat_get_code_by_vma,R_ECX);
10.59 + } else {
10.60 + call_func1(xlat_get_code,R_ECX);
10.61 + }
10.62 + AND_imm8s_r32( 0xFC, R_EAX ); // 3
10.63 + load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
10.64 + ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
10.65 + POP_r32(R_EBP);
10.66 + RET();
10.67 +}
10.68 +
10.69 +#define EXIT_BLOCK_REL_SIZE(pc) (27 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
10.70 +
10.71 +/**
10.72 + * Exit the block to a relative PC
10.73 + */
10.74 +void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
10.75 +{
10.76 + load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
10.77 + ADD_sh4r_r32( R_PC, R_ECX );
10.78 + store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
10.79 + if( IS_IN_ICACHE(pc) ) {
10.80 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
10.81 + } else if( sh4_x86.tlb_on ) {
10.82 + call_func1(xlat_get_code_by_vma,R_ECX);
10.83 + } else {
10.84 + call_func1(xlat_get_code,R_ECX);
10.85 + }
10.86 AND_imm8s_r32( 0xFC, R_EAX ); // 3
10.87 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
10.88 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
10.89 @@ -178,23 +209,22 @@
10.90 void sh4_translate_end_block( sh4addr_t pc ) {
10.91 if( sh4_x86.branch_taken == FALSE ) {
10.92 // Didn't exit unconditionally already, so write the termination here
10.93 - exit_block( pc, pc );
10.94 + exit_block_rel( pc, pc );
10.95 }
10.96 if( sh4_x86.backpatch_posn != 0 ) {
10.97 unsigned int i;
10.98 // Raise exception
10.99 uint8_t *end_ptr = xlat_output;
10.100 - load_spreg( R_ECX, REG_OFFSET(pc) );
10.101 + MOV_r32_r32( R_EDX, R_ECX );
10.102 ADD_r32_r32( R_EDX, R_ECX );
10.103 - ADD_r32_r32( R_EDX, R_ECX );
10.104 - store_spreg( R_ECX, REG_OFFSET(pc) );
10.105 + ADD_r32_sh4r( R_ECX, R_PC );
10.106 MOV_moff32_EAX( &sh4_cpu_period );
10.107 MUL_r32( R_EDX );
10.108 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
10.109
10.110 POP_r32(R_EDX);
10.111 call_func1( sh4_raise_exception, R_EDX );
10.112 - load_spreg( R_EAX, REG_OFFSET(pc) );
10.113 + load_spreg( R_EAX, R_PC );
10.114 if( sh4_x86.tlb_on ) {
10.115 call_func1(xlat_get_code_by_vma,R_EAX);
10.116 } else {
10.117 @@ -205,14 +235,13 @@
10.118
10.119 // Exception already raised - just cleanup
10.120 uint8_t *preexc_ptr = xlat_output;
10.121 - load_imm32( R_ECX, sh4_x86.block_start_pc );
10.122 + MOV_r32_r32( R_EDX, R_ECX );
10.123 ADD_r32_r32( R_EDX, R_ECX );
10.124 - ADD_r32_r32( R_EDX, R_ECX );
10.125 - store_spreg( R_ECX, REG_OFFSET(spc) );
10.126 + ADD_r32_sh4r( R_ECX, R_SPC );
10.127 MOV_moff32_EAX( &sh4_cpu_period );
10.128 MUL_r32( R_EDX );
10.129 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
10.130 - load_spreg( R_EAX, REG_OFFSET(pc) );
10.131 + load_spreg( R_EAX, R_PC );
10.132 if( sh4_x86.tlb_on ) {
10.133 call_func1(xlat_get_code_by_vma,R_EAX);
10.134 } else {
11.1 --- a/src/sh4/ia64abi.h Sun Jan 06 12:24:18 2008 +0000
11.2 +++ b/src/sh4/ia64abi.h Thu Jan 10 08:28:37 2008 +0000
11.3 @@ -50,7 +50,7 @@
11.4 call_func0(ptr);
11.5 }
11.6
11.7 -#define MEM_WRITE_DOUBLE_SIZE 39
11.8 +#define MEM_WRITE_DOUBLE_SIZE 35
11.9 /**
11.10 * Write a double (64-bit) value into memory, with the first word in arg2a, and
11.11 * the second in arg2b
11.12 @@ -60,10 +60,10 @@
11.13 PUSH_r32(arg2b);
11.14 PUSH_r32(addr);
11.15 call_func2(sh4_write_long, addr, arg2a);
11.16 - POP_r32(addr);
11.17 - POP_r32(arg2b);
11.18 - ADD_imm8s_r32(4, addr);
11.19 - call_func2(sh4_write_long, addr, arg2b);
11.20 + POP_r32(R_EDI);
11.21 + POP_r32(R_ESI);
11.22 + ADD_imm8s_r32(4, R_EDI);
11.23 + call_func0(sh4_write_long);
11.24 }
11.25
11.26 #define MEM_READ_DOUBLE_SIZE 43
11.27 @@ -101,8 +101,9 @@
11.28 sh4_x86.fpuen_checked = FALSE;
11.29 sh4_x86.branch_taken = FALSE;
11.30 sh4_x86.backpatch_posn = 0;
11.31 + sh4_x86.recovery_posn = 0;
11.32 sh4_x86.block_start_pc = pc;
11.33 - sh4_x86.tlb_on = MMIO_READ(MMU,MMUCR)&MMUCR_AT;
11.34 + sh4_x86.tlb_on = IS_MMU_ENABLED();
11.35 sh4_x86.tstate = TSTATE_NONE;
11.36 }
11.37
11.38 @@ -124,7 +125,7 @@
11.39 RET();
11.40 }
11.41
11.42 -#define EXIT_BLOCK_SIZE 35
11.43 +#define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
11.44 /**
11.45 * Exit the block to an absolute PC
11.46 */
11.47 @@ -132,8 +133,39 @@
11.48 {
11.49 load_imm32( R_ECX, pc ); // 5
11.50 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
11.51 - REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
11.52 - REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 3
11.53 + if( IS_IN_ICACHE(pc) ) {
11.54 + REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
11.55 + } else if( sh4_x86.tlb_on ) {
11.56 + call_func1(xlat_get_code_by_vma, R_ECX);
11.57 + } else {
11.58 + call_func1(xlat_get_code,R_ECX);
11.59 + }
11.60 + REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
11.61 + load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
11.62 + ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
11.63 + POP_r32(R_EBP);
11.64 + RET();
11.65 +}
11.66 +
11.67 +
11.68 +#define EXIT_BLOCK_REL_SIZE(pc) (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
11.69 +
11.70 +/**
11.71 + * Exit the block to a relative PC
11.72 + */
11.73 +void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
11.74 +{
11.75 + load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
11.76 + ADD_sh4r_r32( R_PC, R_ECX );
11.77 + store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
11.78 + if( IS_IN_ICACHE(pc) ) {
11.79 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
11.80 + } else if( sh4_x86.tlb_on ) {
11.81 + call_func1(xlat_get_code_by_vma,R_ECX);
11.82 + } else {
11.83 + call_func1(xlat_get_code,R_ECX);
11.84 + }
11.85 + REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
11.86 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
11.87 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
11.88 POP_r32(R_EBP);
11.89 @@ -146,22 +178,21 @@
11.90 void sh4_translate_end_block( sh4addr_t pc ) {
11.91 if( sh4_x86.branch_taken == FALSE ) {
11.92 // Didn't exit unconditionally already, so write the termination here
11.93 - exit_block( pc, pc );
11.94 + exit_block_rel( pc, pc );
11.95 }
11.96 if( sh4_x86.backpatch_posn != 0 ) {
11.97 unsigned int i;
11.98 // Raise exception
11.99 uint8_t *end_ptr = xlat_output;
11.100 - load_spreg( R_ECX, REG_OFFSET(pc) );
11.101 + MOV_r32_r32( R_EDX, R_ECX );
11.102 ADD_r32_r32( R_EDX, R_ECX );
11.103 - ADD_r32_r32( R_EDX, R_ECX );
11.104 - store_spreg( R_ECX, REG_OFFSET(pc) );
11.105 + ADD_r32_sh4r( R_ECX, R_PC );
11.106 MOV_moff32_EAX( &sh4_cpu_period );
11.107 MUL_r32( R_EDX );
11.108 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
11.109
11.110 call_func0( sh4_raise_exception );
11.111 - load_spreg( R_EAX, REG_OFFSET(pc) );
11.112 + load_spreg( R_EAX, R_PC );
11.113 if( sh4_x86.tlb_on ) {
11.114 call_func1(xlat_get_code_by_vma,R_EAX);
11.115 } else {
11.116 @@ -172,18 +203,17 @@
11.117
11.118 // Exception already raised - just cleanup
11.119 uint8_t *preexc_ptr = xlat_output;
11.120 - load_imm32( R_ECX, sh4_x86.block_start_pc );
11.121 + MOV_r32_r32( R_EDX, R_ECX );
11.122 ADD_r32_r32( R_EDX, R_ECX );
11.123 - ADD_r32_r32( R_EDX, R_ECX );
11.124 - store_spreg( R_ECX, REG_OFFSET(spc) );
11.125 + ADD_r32_sh4r( R_ECX, R_SPC );
11.126 MOV_moff32_EAX( &sh4_cpu_period );
11.127 MUL_r32( R_EDX );
11.128 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
11.129 - load_spreg( R_EAX, REG_OFFSET(pc) );
11.130 + load_spreg( R_EDI, R_PC );
11.131 if( sh4_x86.tlb_on ) {
11.132 - call_func1(xlat_get_code_by_vma,R_EAX);
11.133 + call_func0(xlat_get_code_by_vma);
11.134 } else {
11.135 - call_func1(xlat_get_code,R_EAX);
11.136 + call_func0(xlat_get_code);
11.137 }
11.138 POP_r32(R_EBP);
11.139 RET();
12.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
12.2 +++ b/src/sh4/ia64asm.s Thu Jan 10 08:28:37 2008 +0000
12.3 @@ -0,0 +1,34 @@
12.4 +#
12.5 +# Scan back through the stack until we hit the currently executing
12.6 +# translation block, and find the call return address to that block.
12.7 +#
12.8 +# Implementation: iterate back through each stack frame until we find
12.9 +# a frame that has a saved %ebp == sh4r (setup by the xlat blocks).
12.10 +# The return address is then the stack value immediately before the
12.11 +# saved %ebp.
12.12 +#
12.13 +# At most 8 stack frames are checked, to prevent infinite looping on a
12.14 +# corrupt stack.
12.15 +
12.16 +.global xlat_get_native_pc
12.17 +xlat_get_native_pc:
12.18 + mov %rbp, %rax
12.19 + mov $0x8, %ecx
12.20 + mov $sh4r, %rdx
12.21 +
12.22 +frame_loop:
12.23 + test %rax, %rax
12.24 + je frame_not_found
12.25 + cmpq (%rax), %rdx
12.26 + je frame_found
12.27 + sub $0x1, %ecx
12.28 + je frame_not_found
12.29 + movq (%rax), %rax
12.30 + jmp frame_loop
12.31 +
12.32 +frame_found:
12.33 + movl 0x4(%rax), %rax
12.34 + ret
12.35 +frame_not_found:
12.36 + xor %rax, %rax
12.37 + ret
13.1 --- a/src/sh4/mmu.c Sun Jan 06 12:24:18 2008 +0000
13.2 +++ b/src/sh4/mmu.c Thu Jan 10 08:28:37 2008 +0000
13.3 @@ -158,11 +158,12 @@
13.4 mmu_lrui = (val >> 26) & 0x3F;
13.5 val &= 0x00000301;
13.6 tmp = MMIO_READ( MMU, MMUCR );
13.7 - if( ((val ^ tmp) & MMUCR_AT) ) {
13.8 + if( ((val ^ tmp) & MMUCR_AT) && sh4_is_using_xlat() ) {
13.9 // AT flag has changed state - flush the xlt cache as all bets
13.10 // are off now. We also need to force an immediate exit from the
13.11 // current block
13.12 - xlat_flush_cache();
13.13 + MMIO_WRITE( MMU, MMUCR, val );
13.14 + sh4_translate_flush_cache();
13.15 }
13.16 break;
13.17 case CCR:
13.18 @@ -221,7 +222,7 @@
13.19 return 1;
13.20 }
13.21 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
13.22 - return 1;
13.23 + return 1;
13.24 }
13.25 return 0;
13.26 }
14.1 --- a/src/sh4/sh4.c Sun Jan 06 12:24:18 2008 +0000
14.2 +++ b/src/sh4/sh4.c Thu Jan 10 08:28:37 2008 +0000
14.3 @@ -67,6 +67,11 @@
14.4 #endif
14.5 }
14.6
14.7 +gboolean sh4_is_using_xlat()
14.8 +{
14.9 + return sh4_use_translator;
14.10 +}
14.11 +
14.12 void sh4_init(void)
14.13 {
14.14 register_io_regions( mmio_list_sh4mmio );
14.15 @@ -200,7 +205,9 @@
14.16
14.17 void sh4_write_sr( uint32_t newval )
14.18 {
14.19 - if( (newval ^ sh4r.sr) & SR_RB )
14.20 + int oldbank = (sh4r.sr&SR_MDRB) == SR_MDRB;
14.21 + int newbank = (newval&SR_MDRB) == SR_MDRB;
14.22 + if( oldbank != newbank )
14.23 sh4_switch_banks();
14.24 sh4r.sr = newval;
14.25 sh4r.t = (newval&SR_T) ? 1 : 0;
15.1 --- a/src/sh4/sh4.h Sun Jan 06 12:24:18 2008 +0000
15.2 +++ b/src/sh4/sh4.h Thu Jan 10 08:28:37 2008 +0000
15.3 @@ -97,6 +97,11 @@
15.4 void sh4_set_use_xlat( gboolean use );
15.5
15.6 /**
15.7 + * Test if system is currently using the translation engine.
15.8 + */
15.9 +gboolean sh4_is_using_xlat();
15.10 +
15.11 +/**
15.12 * Explicitly set the SH4 PC to the supplied value - this will be the next
15.13 * instruction executed. This should only be called while the system is stopped.
15.14 */
16.1 --- a/src/sh4/sh4core.h Sun Jan 06 12:24:18 2008 +0000
16.2 +++ b/src/sh4/sh4core.h Thu Jan 10 08:28:37 2008 +0000
16.3 @@ -185,6 +185,7 @@
16.4 #define SR_T 0x00000001 /* True/false or carry/borrow */
16.5 #define SR_MASK 0x700083F3
16.6 #define SR_MQSTMASK 0xFFFFFCFC /* Mask to clear the flags we're keeping separately */
16.7 +#define SR_MDRB 0x60000000 /* MD+RB mask for convenience */
16.8
16.9 #define IS_SH4_PRIVMODE() (sh4r.sr&SR_MD)
16.10 #define SH4_INTMASK() ((sh4r.sr&SR_IMASK)>>4)
17.1 --- a/src/sh4/sh4mem.c Sun Jan 06 12:24:18 2008 +0000
17.2 +++ b/src/sh4/sh4mem.c Thu Jan 10 08:28:37 2008 +0000
17.3 @@ -426,6 +426,9 @@
17.4 void sh4_flush_store_queue( sh4addr_t addr )
17.5 {
17.6 /* Store queue operation */
17.7 + if( IS_MMU_ENABLED() ) {
17.8 +
17.9 + }
17.10 int queue = (addr&0x20)>>2;
17.11 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
17.12 uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
18.1 --- a/src/sh4/sh4trans.c Sun Jan 06 12:24:18 2008 +0000
18.2 +++ b/src/sh4/sh4trans.c Thu Jan 10 08:28:37 2008 +0000
18.3 @@ -17,15 +17,16 @@
18.4 * GNU General Public License for more details.
18.5 */
18.6 #include <assert.h>
18.7 +#include <setjmp.h>
18.8 #include "eventq.h"
18.9 #include "syscall.h"
18.10 +#include "clock.h"
18.11 #include "sh4/sh4core.h"
18.12 #include "sh4/sh4trans.h"
18.13 #include "sh4/xltcache.h"
18.14
18.15
18.16 -uint32_t last_pc;
18.17 -void *last_code;
18.18 +static jmp_buf xlat_jmp_buf;
18.19 /**
18.20 * Execute a timeslice using translated code only (ie translate/execute loop)
18.21 * Note this version does not support breakpoints
18.22 @@ -41,6 +42,8 @@
18.23 }
18.24 }
18.25
18.26 + int jmp = setjmp(xlat_jmp_buf);
18.27 +
18.28 void * (*code)() = NULL;
18.29 while( sh4r.slice_cycle < nanosecs ) {
18.30 if( sh4r.event_pending <= sh4r.slice_cycle ) {
18.31 @@ -66,8 +69,6 @@
18.32 code = sh4_translate_basic_block( sh4r.pc );
18.33 }
18.34 }
18.35 - last_pc = sh4r.pc;
18.36 - last_code = code;
18.37 code = code();
18.38 }
18.39
18.40 @@ -79,6 +80,8 @@
18.41 }
18.42
18.43 uint8_t *xlat_output;
18.44 +struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
18.45 +uint32_t xlat_recovery_posn;
18.46
18.47 /**
18.48 * Translate a linear basic block, ie all instructions from the start address
18.49 @@ -94,6 +97,7 @@
18.50 int done;
18.51 xlat_cache_block_t block = xlat_start_block( start );
18.52 xlat_output = (uint8_t *)block->code;
18.53 + xlat_recovery_posn = 0;
18.54 uint8_t *eob = xlat_output + block->size;
18.55 sh4_translate_begin_block(pc);
18.56
18.57 @@ -118,7 +122,19 @@
18.58 xlat_output = block->code + (xlat_output - oldstart);
18.59 }
18.60 sh4_translate_end_block(pc);
18.61 - xlat_commit_block( xlat_output - block->code, pc-start );
18.62 +
18.63 + /* Write the recovery records onto the end of the code block */
18.64 + uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
18.65 + uint32_t finalsize = xlat_output - block->code + recovery_size;
18.66 + if( finalsize > block->size ) {
18.67 + uint8_t *oldstart = block->code;
18.68 + block = xlat_extend_block( finalsize );
18.69 + xlat_output = block->code + (xlat_output - oldstart);
18.70 + }
18.71 + memcpy( xlat_output, xlat_recovery, recovery_size);
18.72 + block->recover_table = (xlat_recovery_record_t)xlat_output;
18.73 + block->recover_table_size = xlat_recovery_posn;
18.74 + xlat_commit_block( finalsize, pc-start );
18.75 return block->code;
18.76 }
18.77
18.78 @@ -147,3 +163,89 @@
18.79 void * (*code)() = (void *)buf;
18.80 return code();
18.81 }
18.82 +
18.83 +/**
18.84 + * "Execute" the supplied recovery record. Currently this only updates
18.85 + * sh4r.pc and sh4r.slice_cycle according to the currently executing
18.86 + * instruction. In future this may be more sophisticated (ie will
18.87 + * call into generated code).
18.88 + */
18.89 +void sh4_translate_run_recovery( xlat_recovery_record_t recovery )
18.90 +{
18.91 + sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
18.92 + sh4r.pc += (recovery->sh4_icount<<1);
18.93 +}
18.94 +
18.95 +void sh4_translate_unwind_stack( gboolean abort_after, unwind_thunk_t thunk )
18.96 +{
18.97 + void *pc = xlat_get_native_pc();
18.98 + if( pc == NULL ) {
18.99 + // This should never happen - indicative of a bug somewhere.
18.100 + FATAL("Attempted to unwind stack, but translator is not running or stack is corrupt");
18.101 + }
18.102 + void *code = xlat_get_code( sh4r.pc );
18.103 + xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
18.104 + if( recover != NULL ) {
18.105 + // Can be null if there is no recovery necessary
18.106 + sh4_translate_run_recovery(recover);
18.107 + }
18.108 + if( thunk != NULL ) {
18.109 + thunk();
18.110 + }
18.111 + // finally longjmp back into sh4_xlat_run_slice
18.112 + longjmp(xlat_jmp_buf, 1);
18.113 +}
18.114 +
18.115 +/**
18.116 + * Exit the current block at the end of the current instruction, flush the
18.117 + * translation cache (completely) and return control to sh4_xlat_run_slice.
18.118 + *
18.119 + * As a special case, if the current instruction is actually the last
18.120 + * instruction in the block (ie it's in a delay slot), this function
18.121 + * returns to allow normal completion of the translation block. Otherwise
18.122 + * this function never returns.
18.123 + *
18.124 + * Must only be invoked (indirectly) from within translated code.
18.125 + */
18.126 +void sh4_translate_flush_cache()
18.127 +{
18.128 + void *pc = xlat_get_native_pc();
18.129 + if( pc == NULL ) {
18.130 + // This should never happen - indicative of a bug somewhere.
18.131 + FATAL("Attempted to unwind stack, but translator is not running or stack is corrupt");
18.132 + }
18.133 + void *code = xlat_get_code( sh4r.pc );
18.134 + xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
18.135 + if( recover != NULL ) {
18.136 + // Can be null if there is no recovery necessary
18.137 + sh4_translate_run_recovery(recover);
18.138 + xlat_flush_cache();
18.139 + longjmp(xlat_jmp_buf, 1);
18.140 + } else {
18.141 + xlat_flush_cache();
18.142 + return;
18.143 + }
18.144 +}
18.145 +
18.146 +void *xlat_get_code_by_vma( sh4vma_t vma )
18.147 +{
18.148 + void *result = NULL;
18.149 +
18.150 + if( !IS_IN_ICACHE(vma) ) {
18.151 + if( !mmu_update_icache(sh4r.pc) ) {
18.152 + // fault - off to the fault handler
18.153 + if( !mmu_update_icache(sh4r.pc) ) {
18.154 + // double fault - halt
18.155 + dreamcast_stop();
18.156 + ERROR( "Double fault - halting" );
18.157 + return NULL;
18.158 + }
18.159 + }
18.160 + }
18.161 + if( sh4_icache.page_vma != -1 ) {
18.162 + result = xlat_get_code( GET_ICACHE_PHYS(vma) );
18.163 + }
18.164 +
18.165 + return result;
18.166 +}
18.167 +
19.1 --- a/src/sh4/sh4trans.h Sun Jan 06 12:24:18 2008 +0000
19.2 +++ b/src/sh4/sh4trans.h Thu Jan 10 08:28:37 2008 +0000
19.3 @@ -16,6 +16,7 @@
19.4 * GNU General Public License for more details.
19.5 */
19.6
19.7 +#include "sh4/xltcache.h"
19.8 #include "dream.h"
19.9 #include "mem.h"
19.10
19.11 @@ -27,8 +28,13 @@
19.12 * allows a little room
19.13 */
19.14 #define EPILOGUE_SIZE 128
19.15 +
19.16 +/** Maximum number of recovery records for a translated block (2048 based on
19.17 + * 1 record per SH4 instruction in a 4K page).
19.18 + */
19.19 +#define MAX_RECOVERY_SIZE 2048
19.20 +
19.21 /**
19.22 -
19.23 */
19.24 uint32_t sh4_xlat_run_slice( uint32_t nanosecs );
19.25
19.26 @@ -38,7 +44,10 @@
19.27 */
19.28 void *sh4_translate_basic_block( sh4addr_t start );
19.29
19.30 +
19.31 extern uint8_t *xlat_output;
19.32 +extern struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
19.33 +extern uint32_t xlat_recovery_posn;
19.34
19.35 /******************************************************************************
19.36 * Code generation - these methods must be provided by the
19.37 @@ -51,3 +60,22 @@
19.38 void sh4_translate_begin_block( sh4addr_t pc );
19.39 uint32_t sh4_translate_instruction( sh4addr_t pc );
19.40 void sh4_translate_end_block( sh4addr_t pc );
19.41 +
19.42 +typedef void (*unwind_thunk_t)(void);
19.43 +
19.44 +/**
19.45 + * From within the translator, (typically called from MMU exception handling routines)
19.46 + * immediately exit the current translation block (performing cleanup as necessary) and
19.47 + * return to sh4_xlat_run_slice(). Effectively a fast longjmp w/ xlat recovery.
19.48 + *
19.49 + * Note: The correct working of this method depends on the translator anticipating the
19.50 + * exception and generating the appropriate recovery block(s) - currently this means
19.51 + * that it should ONLY be called from within the context of a memory read or write.
19.52 + *
19.53 + * @param is_completion If TRUE, exit after completing the current instruction (effectively),
19.54 + * otherwise abort the current instruction with no effect.
19.55 + * @param thunk A function to execute after perform xlat recovery, but before returning
19.56 + * to run_slice. If NULL, control returns directly.
19.57 + * @return This method never returns.
19.58 + */
19.59 +void sh4_translate_unwind_stack( gboolean is_completion, unwind_thunk_t thunk );
20.1 --- a/src/sh4/sh4x86.c Sun Jan 06 12:24:18 2008 +0000
20.2 +++ b/src/sh4/sh4x86.c Thu Jan 10 08:28:37 2008 +0000
20.3 @@ -40,6 +40,8 @@
20.4 uint32_t exc_code;
20.5 };
20.6
20.7 +#define MAX_RECOVERY_SIZE 2048
20.8 +
20.9 /**
20.10 * Struct to manage internal translation state. This state is not saved -
20.11 * it is only valid between calls to sh4_translate_begin_block() and
20.12 @@ -61,6 +63,8 @@
20.13 struct backpatch_record *backpatch_list;
20.14 uint32_t backpatch_posn;
20.15 uint32_t backpatch_size;
20.16 + struct xlat_recovery_record recovery_list[MAX_RECOVERY_SIZE];
20.17 + uint32_t recovery_posn;
20.18 };
20.19
20.20 #define TSTATE_NONE -1
20.21 @@ -115,6 +119,13 @@
20.22 sh4_x86.backpatch_posn++;
20.23 }
20.24
20.25 +void sh4_x86_add_recovery( uint32_t pc )
20.26 +{
20.27 + xlat_recovery[xlat_recovery_posn].xlat_pc = (uintptr_t)xlat_output;
20.28 + xlat_recovery[xlat_recovery_posn].sh4_icount = (pc - sh4_x86.block_start_pc)>>1;
20.29 + xlat_recovery_posn++;
20.30 +}
20.31 +
20.32 /**
20.33 * Emit an instruction to load an SH4 reg into a real register
20.34 */
20.35 @@ -309,34 +320,27 @@
20.36
20.37 #define UNDEF()
20.38 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
20.39 -#define MEM_READ_BYTE_PHYS( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
20.40 -#define MEM_READ_WORD_PHYS( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
20.41 -#define MEM_READ_LONG_PHYS( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
20.42 -#define MEM_WRITE_BYTE_PHYS( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
20.43 -#define MEM_WRITE_WORD_PHYS( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
20.44 -#define MEM_WRITE_LONG_PHYS( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
20.45 +#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
20.46 +#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
20.47 +#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
20.48 +#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
20.49 +#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
20.50 +#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
20.51
20.52 -#define MEM_READ_BYTE_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_byte, R_EAX); MEM_RESULT(value_reg)
20.53 -#define MEM_READ_WORD_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_word, R_EAX); MEM_RESULT(value_reg)
20.54 -#define MEM_READ_LONG_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_long, R_EAX); MEM_RESULT(value_reg)
20.55 -#define MEM_WRITE_BYTE_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_byte, R_EAX, value_reg)
20.56 -#define MEM_WRITE_WORD_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_word, R_EAX, value_reg)
20.57 -#define MEM_WRITE_LONG_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_long, R_EAX, value_reg)
20.58 +/**
20.59 + * Perform MMU translation on the address in addr_reg for a read operation, iff the TLB is turned
20.60 + * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
20.61 + */
20.62 +#define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
20.63 +/**
20.64 + * Perform MMU translation on the address in addr_reg for a write operation, iff the TLB is turned
20.65 + * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
20.66 + */
20.67 +#define MMU_TRANSLATE_WRITE( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
20.68
20.69 -#define MEM_READ_BYTE( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_BYTE_VMA(addr_reg,value_reg);}else{MEM_READ_BYTE_PHYS(addr_reg, value_reg);}
20.70 -#define MEM_READ_WORD( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_WORD_VMA(addr_reg,value_reg);}else{MEM_READ_WORD_PHYS(addr_reg, value_reg);}
20.71 -#define MEM_READ_LONG( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_LONG_VMA(addr_reg,value_reg);}else{MEM_READ_LONG_PHYS(addr_reg, value_reg);}
20.72 -#define MEM_WRITE_BYTE( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_BYTE_VMA(addr_reg,value_reg);}else{MEM_WRITE_BYTE_PHYS(addr_reg, value_reg);}
20.73 -#define MEM_WRITE_WORD( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_WORD_VMA(addr_reg,value_reg);}else{MEM_WRITE_WORD_PHYS(addr_reg, value_reg);}
20.74 -#define MEM_WRITE_LONG( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_LONG_VMA(addr_reg,value_reg);}else{MEM_WRITE_LONG_PHYS(addr_reg, value_reg);}
20.75 -
20.76 -#define MEM_READ_SIZE_PHYS (CALL_FUNC1_SIZE)
20.77 -#define MEM_WRITE_SIZE_PHYS (CALL_FUNC2_SIZE)
20.78 -#define MEM_READ_SIZE_VMA (CALL_FUNC1_SIZE + CALL_FUNC1_SIZE + 12)
20.79 -#define MEM_WRITE_SIZE_VMA (CALL_FUNC1_SIZE + CALL_FUNC2_SIZE + 12)
20.80 -
20.81 -#define MEM_READ_SIZE (sh4_x86.tlb_on?MEM_READ_SIZE_VMA:MEM_READ_SIZE_PHYS)
20.82 -#define MEM_WRITE_SIZE (sh4_x86.tlb_on?MEM_WRITE_SIZE_VMA:MEM_WRITE_SIZE_PHYS)
20.83 +#define MEM_READ_SIZE (CALL_FUNC1_SIZE)
20.84 +#define MEM_WRITE_SIZE (CALL_FUNC2_SIZE)
20.85 +#define MMU_TRANSLATE_SIZE (sh4_x86.tlb_on ? (CALL_FUNC1_SIZE + 12) : 0 )
20.86
20.87 #define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
20.88
20.89 @@ -369,6 +373,9 @@
20.90 } else {
20.91 ir = sh4_read_word(pc);
20.92 }
20.93 + if( !sh4_x86.in_delay_slot ) {
20.94 + sh4_x86_add_recovery(pc);
20.95 + }
20.96 switch( (ir&0xF000) >> 12 ) {
20.97 case 0x0:
20.98 switch( ir&0xF ) {
20.99 @@ -505,10 +512,11 @@
20.100 case 0xC:
20.101 { /* MOVCA.L R0, @Rn */
20.102 uint32_t Rn = ((ir>>8)&0xF);
20.103 - load_reg( R_EAX, 0 );
20.104 - load_reg( R_ECX, Rn );
20.105 - check_walign32( R_ECX );
20.106 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.107 + load_reg( R_EAX, Rn );
20.108 + check_walign32( R_EAX );
20.109 + MMU_TRANSLATE_WRITE( R_EAX );
20.110 + load_reg( R_EDX, 0 );
20.111 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.112 sh4_x86.tstate = TSTATE_NONE;
20.113 }
20.114 break;
20.115 @@ -522,9 +530,10 @@
20.116 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.117 load_reg( R_EAX, 0 );
20.118 load_reg( R_ECX, Rn );
20.119 - ADD_r32_r32( R_EAX, R_ECX );
20.120 - load_reg( R_EAX, Rm );
20.121 - MEM_WRITE_BYTE( R_ECX, R_EAX );
20.122 + ADD_r32_r32( R_ECX, R_EAX );
20.123 + MMU_TRANSLATE_WRITE( R_EAX );
20.124 + load_reg( R_EDX, Rm );
20.125 + MEM_WRITE_BYTE( R_EAX, R_EDX );
20.126 sh4_x86.tstate = TSTATE_NONE;
20.127 }
20.128 break;
20.129 @@ -533,10 +542,11 @@
20.130 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.131 load_reg( R_EAX, 0 );
20.132 load_reg( R_ECX, Rn );
20.133 - ADD_r32_r32( R_EAX, R_ECX );
20.134 - check_walign16( R_ECX );
20.135 - load_reg( R_EAX, Rm );
20.136 - MEM_WRITE_WORD( R_ECX, R_EAX );
20.137 + ADD_r32_r32( R_ECX, R_EAX );
20.138 + check_walign16( R_EAX );
20.139 + MMU_TRANSLATE_WRITE( R_EAX );
20.140 + load_reg( R_EDX, Rm );
20.141 + MEM_WRITE_WORD( R_EAX, R_EDX );
20.142 sh4_x86.tstate = TSTATE_NONE;
20.143 }
20.144 break;
20.145 @@ -545,10 +555,11 @@
20.146 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.147 load_reg( R_EAX, 0 );
20.148 load_reg( R_ECX, Rn );
20.149 - ADD_r32_r32( R_EAX, R_ECX );
20.150 - check_walign32( R_ECX );
20.151 - load_reg( R_EAX, Rm );
20.152 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.153 + ADD_r32_r32( R_ECX, R_EAX );
20.154 + check_walign32( R_EAX );
20.155 + MMU_TRANSLATE_WRITE( R_EAX );
20.156 + load_reg( R_EDX, Rm );
20.157 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.158 sh4_x86.tstate = TSTATE_NONE;
20.159 }
20.160 break;
20.161 @@ -755,8 +766,9 @@
20.162 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.163 load_reg( R_EAX, 0 );
20.164 load_reg( R_ECX, Rm );
20.165 - ADD_r32_r32( R_EAX, R_ECX );
20.166 - MEM_READ_BYTE( R_ECX, R_EAX );
20.167 + ADD_r32_r32( R_ECX, R_EAX );
20.168 + MMU_TRANSLATE_READ( R_EAX )
20.169 + MEM_READ_BYTE( R_EAX, R_EAX );
20.170 store_reg( R_EAX, Rn );
20.171 sh4_x86.tstate = TSTATE_NONE;
20.172 }
20.173 @@ -766,9 +778,10 @@
20.174 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.175 load_reg( R_EAX, 0 );
20.176 load_reg( R_ECX, Rm );
20.177 - ADD_r32_r32( R_EAX, R_ECX );
20.178 - check_ralign16( R_ECX );
20.179 - MEM_READ_WORD( R_ECX, R_EAX );
20.180 + ADD_r32_r32( R_ECX, R_EAX );
20.181 + check_ralign16( R_EAX );
20.182 + MMU_TRANSLATE_READ( R_EAX );
20.183 + MEM_READ_WORD( R_EAX, R_EAX );
20.184 store_reg( R_EAX, Rn );
20.185 sh4_x86.tstate = TSTATE_NONE;
20.186 }
20.187 @@ -778,9 +791,10 @@
20.188 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.189 load_reg( R_EAX, 0 );
20.190 load_reg( R_ECX, Rm );
20.191 - ADD_r32_r32( R_EAX, R_ECX );
20.192 - check_ralign32( R_ECX );
20.193 - MEM_READ_LONG( R_ECX, R_EAX );
20.194 + ADD_r32_r32( R_ECX, R_EAX );
20.195 + check_ralign32( R_EAX );
20.196 + MMU_TRANSLATE_READ( R_EAX );
20.197 + MEM_READ_LONG( R_EAX, R_EAX );
20.198 store_reg( R_EAX, Rn );
20.199 sh4_x86.tstate = TSTATE_NONE;
20.200 }
20.201 @@ -788,17 +802,34 @@
20.202 case 0xF:
20.203 { /* MAC.L @Rm+, @Rn+ */
20.204 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.205 - load_reg( R_ECX, Rm );
20.206 - check_ralign32( R_ECX );
20.207 - load_reg( R_ECX, Rn );
20.208 - check_ralign32( R_ECX );
20.209 - ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
20.210 - MEM_READ_LONG( R_ECX, R_EAX );
20.211 - PUSH_realigned_r32( R_EAX );
20.212 - load_reg( R_ECX, Rm );
20.213 - ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.214 + if( Rm == Rn ) {
20.215 + load_reg( R_EAX, Rm );
20.216 + check_ralign32( R_EAX );
20.217 + MMU_TRANSLATE_READ( R_EAX );
20.218 + PUSH_realigned_r32( R_EAX );
20.219 + load_reg( R_EAX, Rn );
20.220 + ADD_imm8s_r32( 4, R_EAX );
20.221 + MMU_TRANSLATE_READ( R_EAX );
20.222 + ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rn]) );
20.223 + // Note translate twice in case of page boundaries. Maybe worth
20.224 + // adding a page-boundary check to skip the second translation
20.225 + } else {
20.226 + load_reg( R_EAX, Rm );
20.227 + check_ralign32( R_EAX );
20.228 + MMU_TRANSLATE_READ( R_EAX );
20.229 + PUSH_realigned_r32( R_EAX );
20.230 + load_reg( R_EAX, Rn );
20.231 + check_ralign32( R_EAX );
20.232 + MMU_TRANSLATE_READ( R_EAX );
20.233 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
20.234 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.235 + }
20.236 + MEM_READ_LONG( R_EAX, R_EAX );
20.237 + POP_r32( R_ECX );
20.238 + PUSH_r32( R_EAX );
20.239 MEM_READ_LONG( R_ECX, R_EAX );
20.240 POP_realigned_r32( R_ECX );
20.241 +
20.242 IMUL_r32( R_ECX );
20.243 ADD_r32_sh4r( R_EAX, R_MACL );
20.244 ADC_r32_sh4r( R_EDX, R_MACH );
20.245 @@ -819,11 +850,12 @@
20.246 case 0x1:
20.247 { /* MOV.L Rm, @(disp, Rn) */
20.248 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
20.249 - load_reg( R_ECX, Rn );
20.250 - load_reg( R_EAX, Rm );
20.251 - ADD_imm32_r32( disp, R_ECX );
20.252 - check_walign32( R_ECX );
20.253 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.254 + load_reg( R_EAX, Rn );
20.255 + ADD_imm32_r32( disp, R_EAX );
20.256 + check_walign32( R_EAX );
20.257 + MMU_TRANSLATE_WRITE( R_EAX );
20.258 + load_reg( R_EDX, Rm );
20.259 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.260 sh4_x86.tstate = TSTATE_NONE;
20.261 }
20.262 break;
20.263 @@ -832,64 +864,70 @@
20.264 case 0x0:
20.265 { /* MOV.B Rm, @Rn */
20.266 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.267 - load_reg( R_EAX, Rm );
20.268 - load_reg( R_ECX, Rn );
20.269 - MEM_WRITE_BYTE( R_ECX, R_EAX );
20.270 + load_reg( R_EAX, Rn );
20.271 + MMU_TRANSLATE_WRITE( R_EAX );
20.272 + load_reg( R_EDX, Rm );
20.273 + MEM_WRITE_BYTE( R_EAX, R_EDX );
20.274 sh4_x86.tstate = TSTATE_NONE;
20.275 }
20.276 break;
20.277 case 0x1:
20.278 { /* MOV.W Rm, @Rn */
20.279 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.280 - load_reg( R_ECX, Rn );
20.281 - check_walign16( R_ECX );
20.282 - load_reg( R_EAX, Rm );
20.283 - MEM_WRITE_WORD( R_ECX, R_EAX );
20.284 + load_reg( R_EAX, Rn );
20.285 + check_walign16( R_EAX );
20.286 + MMU_TRANSLATE_WRITE( R_EAX )
20.287 + load_reg( R_EDX, Rm );
20.288 + MEM_WRITE_WORD( R_EAX, R_EDX );
20.289 sh4_x86.tstate = TSTATE_NONE;
20.290 }
20.291 break;
20.292 case 0x2:
20.293 { /* MOV.L Rm, @Rn */
20.294 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.295 - load_reg( R_EAX, Rm );
20.296 - load_reg( R_ECX, Rn );
20.297 - check_walign32(R_ECX);
20.298 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.299 + load_reg( R_EAX, Rn );
20.300 + check_walign32(R_EAX);
20.301 + MMU_TRANSLATE_WRITE( R_EAX );
20.302 + load_reg( R_EDX, Rm );
20.303 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.304 sh4_x86.tstate = TSTATE_NONE;
20.305 }
20.306 break;
20.307 case 0x4:
20.308 { /* MOV.B Rm, @-Rn */
20.309 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.310 - load_reg( R_EAX, Rm );
20.311 - load_reg( R_ECX, Rn );
20.312 - ADD_imm8s_r32( -1, R_ECX );
20.313 - store_reg( R_ECX, Rn );
20.314 - MEM_WRITE_BYTE( R_ECX, R_EAX );
20.315 + load_reg( R_EAX, Rn );
20.316 + ADD_imm8s_r32( -1, R_EAX );
20.317 + MMU_TRANSLATE_WRITE( R_EAX );
20.318 + load_reg( R_EDX, Rm );
20.319 + ADD_imm8s_sh4r( -1, REG_OFFSET(r[Rn]) );
20.320 + MEM_WRITE_BYTE( R_EAX, R_EDX );
20.321 sh4_x86.tstate = TSTATE_NONE;
20.322 }
20.323 break;
20.324 case 0x5:
20.325 { /* MOV.W Rm, @-Rn */
20.326 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.327 - load_reg( R_ECX, Rn );
20.328 - check_walign16( R_ECX );
20.329 - load_reg( R_EAX, Rm );
20.330 - ADD_imm8s_r32( -2, R_ECX );
20.331 - store_reg( R_ECX, Rn );
20.332 - MEM_WRITE_WORD( R_ECX, R_EAX );
20.333 + load_reg( R_EAX, Rn );
20.334 + ADD_imm8s_r32( -2, R_EAX );
20.335 + check_walign16( R_EAX );
20.336 + MMU_TRANSLATE_WRITE( R_EAX );
20.337 + load_reg( R_EDX, Rm );
20.338 + ADD_imm8s_sh4r( -2, REG_OFFSET(r[Rn]) );
20.339 + MEM_WRITE_WORD( R_EAX, R_EDX );
20.340 sh4_x86.tstate = TSTATE_NONE;
20.341 }
20.342 break;
20.343 case 0x6:
20.344 { /* MOV.L Rm, @-Rn */
20.345 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.346 - load_reg( R_EAX, Rm );
20.347 - load_reg( R_ECX, Rn );
20.348 - check_walign32( R_ECX );
20.349 - ADD_imm8s_r32( -4, R_ECX );
20.350 - store_reg( R_ECX, Rn );
20.351 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.352 + load_reg( R_EAX, Rn );
20.353 + ADD_imm8s_r32( -4, R_EAX );
20.354 + check_walign32( R_EAX );
20.355 + MMU_TRANSLATE_WRITE( R_EAX );
20.356 + load_reg( R_EDX, Rm );
20.357 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.358 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.359 sh4_x86.tstate = TSTATE_NONE;
20.360 }
20.361 break;
20.362 @@ -1262,36 +1300,39 @@
20.363 case 0x0:
20.364 { /* STS.L MACH, @-Rn */
20.365 uint32_t Rn = ((ir>>8)&0xF);
20.366 - load_reg( R_ECX, Rn );
20.367 - check_walign32( R_ECX );
20.368 - ADD_imm8s_r32( -4, R_ECX );
20.369 - store_reg( R_ECX, Rn );
20.370 - load_spreg( R_EAX, R_MACH );
20.371 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.372 + load_reg( R_EAX, Rn );
20.373 + check_walign32( R_EAX );
20.374 + ADD_imm8s_r32( -4, R_EAX );
20.375 + MMU_TRANSLATE_WRITE( R_EAX );
20.376 + load_spreg( R_EDX, R_MACH );
20.377 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.378 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.379 sh4_x86.tstate = TSTATE_NONE;
20.380 }
20.381 break;
20.382 case 0x1:
20.383 { /* STS.L MACL, @-Rn */
20.384 uint32_t Rn = ((ir>>8)&0xF);
20.385 - load_reg( R_ECX, Rn );
20.386 - check_walign32( R_ECX );
20.387 - ADD_imm8s_r32( -4, R_ECX );
20.388 - store_reg( R_ECX, Rn );
20.389 - load_spreg( R_EAX, R_MACL );
20.390 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.391 + load_reg( R_EAX, Rn );
20.392 + check_walign32( R_EAX );
20.393 + ADD_imm8s_r32( -4, R_EAX );
20.394 + MMU_TRANSLATE_WRITE( R_EAX );
20.395 + load_spreg( R_EDX, R_MACL );
20.396 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.397 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.398 sh4_x86.tstate = TSTATE_NONE;
20.399 }
20.400 break;
20.401 case 0x2:
20.402 { /* STS.L PR, @-Rn */
20.403 uint32_t Rn = ((ir>>8)&0xF);
20.404 - load_reg( R_ECX, Rn );
20.405 - check_walign32( R_ECX );
20.406 - ADD_imm8s_r32( -4, R_ECX );
20.407 - store_reg( R_ECX, Rn );
20.408 - load_spreg( R_EAX, R_PR );
20.409 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.410 + load_reg( R_EAX, Rn );
20.411 + check_walign32( R_EAX );
20.412 + ADD_imm8s_r32( -4, R_EAX );
20.413 + MMU_TRANSLATE_WRITE( R_EAX );
20.414 + load_spreg( R_EDX, R_PR );
20.415 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.416 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.417 sh4_x86.tstate = TSTATE_NONE;
20.418 }
20.419 break;
20.420 @@ -1299,36 +1340,39 @@
20.421 { /* STC.L SGR, @-Rn */
20.422 uint32_t Rn = ((ir>>8)&0xF);
20.423 check_priv();
20.424 - load_reg( R_ECX, Rn );
20.425 - check_walign32( R_ECX );
20.426 - ADD_imm8s_r32( -4, R_ECX );
20.427 - store_reg( R_ECX, Rn );
20.428 - load_spreg( R_EAX, R_SGR );
20.429 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.430 + load_reg( R_EAX, Rn );
20.431 + check_walign32( R_EAX );
20.432 + ADD_imm8s_r32( -4, R_EAX );
20.433 + MMU_TRANSLATE_WRITE( R_EAX );
20.434 + load_spreg( R_EDX, R_SGR );
20.435 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.436 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.437 sh4_x86.tstate = TSTATE_NONE;
20.438 }
20.439 break;
20.440 case 0x5:
20.441 { /* STS.L FPUL, @-Rn */
20.442 uint32_t Rn = ((ir>>8)&0xF);
20.443 - load_reg( R_ECX, Rn );
20.444 - check_walign32( R_ECX );
20.445 - ADD_imm8s_r32( -4, R_ECX );
20.446 - store_reg( R_ECX, Rn );
20.447 - load_spreg( R_EAX, R_FPUL );
20.448 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.449 + load_reg( R_EAX, Rn );
20.450 + check_walign32( R_EAX );
20.451 + ADD_imm8s_r32( -4, R_EAX );
20.452 + MMU_TRANSLATE_WRITE( R_EAX );
20.453 + load_spreg( R_EDX, R_FPUL );
20.454 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.455 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.456 sh4_x86.tstate = TSTATE_NONE;
20.457 }
20.458 break;
20.459 case 0x6:
20.460 { /* STS.L FPSCR, @-Rn */
20.461 uint32_t Rn = ((ir>>8)&0xF);
20.462 - load_reg( R_ECX, Rn );
20.463 - check_walign32( R_ECX );
20.464 - ADD_imm8s_r32( -4, R_ECX );
20.465 - store_reg( R_ECX, Rn );
20.466 - load_spreg( R_EAX, R_FPSCR );
20.467 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.468 + load_reg( R_EAX, Rn );
20.469 + check_walign32( R_EAX );
20.470 + ADD_imm8s_r32( -4, R_EAX );
20.471 + MMU_TRANSLATE_WRITE( R_EAX );
20.472 + load_spreg( R_EDX, R_FPSCR );
20.473 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.474 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.475 sh4_x86.tstate = TSTATE_NONE;
20.476 }
20.477 break;
20.478 @@ -1336,12 +1380,13 @@
20.479 { /* STC.L DBR, @-Rn */
20.480 uint32_t Rn = ((ir>>8)&0xF);
20.481 check_priv();
20.482 - load_reg( R_ECX, Rn );
20.483 - check_walign32( R_ECX );
20.484 - ADD_imm8s_r32( -4, R_ECX );
20.485 - store_reg( R_ECX, Rn );
20.486 - load_spreg( R_EAX, R_DBR );
20.487 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.488 + load_reg( R_EAX, Rn );
20.489 + check_walign32( R_EAX );
20.490 + ADD_imm8s_r32( -4, R_EAX );
20.491 + MMU_TRANSLATE_WRITE( R_EAX );
20.492 + load_spreg( R_EDX, R_DBR );
20.493 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.494 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.495 sh4_x86.tstate = TSTATE_NONE;
20.496 }
20.497 break;
20.498 @@ -1358,11 +1403,14 @@
20.499 { /* STC.L SR, @-Rn */
20.500 uint32_t Rn = ((ir>>8)&0xF);
20.501 check_priv();
20.502 + load_reg( R_EAX, Rn );
20.503 + check_walign32( R_EAX );
20.504 + ADD_imm8s_r32( -4, R_EAX );
20.505 + MMU_TRANSLATE_WRITE( R_EAX );
20.506 + PUSH_realigned_r32( R_EAX );
20.507 call_func0( sh4_read_sr );
20.508 - load_reg( R_ECX, Rn );
20.509 - check_walign32( R_ECX );
20.510 - ADD_imm8s_r32( -4, R_ECX );
20.511 - store_reg( R_ECX, Rn );
20.512 + POP_realigned_r32( R_ECX );
20.513 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.514 MEM_WRITE_LONG( R_ECX, R_EAX );
20.515 sh4_x86.tstate = TSTATE_NONE;
20.516 }
20.517 @@ -1370,12 +1418,13 @@
20.518 case 0x1:
20.519 { /* STC.L GBR, @-Rn */
20.520 uint32_t Rn = ((ir>>8)&0xF);
20.521 - load_reg( R_ECX, Rn );
20.522 - check_walign32( R_ECX );
20.523 - ADD_imm8s_r32( -4, R_ECX );
20.524 - store_reg( R_ECX, Rn );
20.525 - load_spreg( R_EAX, R_GBR );
20.526 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.527 + load_reg( R_EAX, Rn );
20.528 + check_walign32( R_EAX );
20.529 + ADD_imm8s_r32( -4, R_EAX );
20.530 + MMU_TRANSLATE_WRITE( R_EAX );
20.531 + load_spreg( R_EDX, R_GBR );
20.532 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.533 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.534 sh4_x86.tstate = TSTATE_NONE;
20.535 }
20.536 break;
20.537 @@ -1383,12 +1432,13 @@
20.538 { /* STC.L VBR, @-Rn */
20.539 uint32_t Rn = ((ir>>8)&0xF);
20.540 check_priv();
20.541 - load_reg( R_ECX, Rn );
20.542 - check_walign32( R_ECX );
20.543 - ADD_imm8s_r32( -4, R_ECX );
20.544 - store_reg( R_ECX, Rn );
20.545 - load_spreg( R_EAX, R_VBR );
20.546 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.547 + load_reg( R_EAX, Rn );
20.548 + check_walign32( R_EAX );
20.549 + ADD_imm8s_r32( -4, R_EAX );
20.550 + MMU_TRANSLATE_WRITE( R_EAX );
20.551 + load_spreg( R_EDX, R_VBR );
20.552 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.553 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.554 sh4_x86.tstate = TSTATE_NONE;
20.555 }
20.556 break;
20.557 @@ -1396,12 +1446,13 @@
20.558 { /* STC.L SSR, @-Rn */
20.559 uint32_t Rn = ((ir>>8)&0xF);
20.560 check_priv();
20.561 - load_reg( R_ECX, Rn );
20.562 - check_walign32( R_ECX );
20.563 - ADD_imm8s_r32( -4, R_ECX );
20.564 - store_reg( R_ECX, Rn );
20.565 - load_spreg( R_EAX, R_SSR );
20.566 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.567 + load_reg( R_EAX, Rn );
20.568 + check_walign32( R_EAX );
20.569 + ADD_imm8s_r32( -4, R_EAX );
20.570 + MMU_TRANSLATE_WRITE( R_EAX );
20.571 + load_spreg( R_EDX, R_SSR );
20.572 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.573 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.574 sh4_x86.tstate = TSTATE_NONE;
20.575 }
20.576 break;
20.577 @@ -1409,12 +1460,13 @@
20.578 { /* STC.L SPC, @-Rn */
20.579 uint32_t Rn = ((ir>>8)&0xF);
20.580 check_priv();
20.581 - load_reg( R_ECX, Rn );
20.582 - check_walign32( R_ECX );
20.583 - ADD_imm8s_r32( -4, R_ECX );
20.584 - store_reg( R_ECX, Rn );
20.585 - load_spreg( R_EAX, R_SPC );
20.586 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.587 + load_reg( R_EAX, Rn );
20.588 + check_walign32( R_EAX );
20.589 + ADD_imm8s_r32( -4, R_EAX );
20.590 + MMU_TRANSLATE_WRITE( R_EAX );
20.591 + load_spreg( R_EDX, R_SPC );
20.592 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.593 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.594 sh4_x86.tstate = TSTATE_NONE;
20.595 }
20.596 break;
20.597 @@ -1427,12 +1479,13 @@
20.598 { /* STC.L Rm_BANK, @-Rn */
20.599 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7);
20.600 check_priv();
20.601 - load_reg( R_ECX, Rn );
20.602 - check_walign32( R_ECX );
20.603 - ADD_imm8s_r32( -4, R_ECX );
20.604 - store_reg( R_ECX, Rn );
20.605 - load_spreg( R_EAX, REG_OFFSET(r_bank[Rm_BANK]) );
20.606 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.607 + load_reg( R_EAX, Rn );
20.608 + check_walign32( R_EAX );
20.609 + ADD_imm8s_r32( -4, R_EAX );
20.610 + MMU_TRANSLATE_WRITE( R_EAX );
20.611 + load_spreg( R_EDX, REG_OFFSET(r_bank[Rm_BANK]) );
20.612 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
20.613 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.614 sh4_x86.tstate = TSTATE_NONE;
20.615 }
20.616 break;
20.617 @@ -1514,10 +1567,9 @@
20.618 uint32_t Rm = ((ir>>8)&0xF);
20.619 load_reg( R_EAX, Rm );
20.620 check_ralign32( R_EAX );
20.621 - MOV_r32_r32( R_EAX, R_ECX );
20.622 - ADD_imm8s_r32( 4, R_EAX );
20.623 - store_reg( R_EAX, Rm );
20.624 - MEM_READ_LONG( R_ECX, R_EAX );
20.625 + MMU_TRANSLATE_READ( R_EAX );
20.626 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.627 + MEM_READ_LONG( R_EAX, R_EAX );
20.628 store_spreg( R_EAX, R_MACH );
20.629 sh4_x86.tstate = TSTATE_NONE;
20.630 }
20.631 @@ -1527,10 +1579,9 @@
20.632 uint32_t Rm = ((ir>>8)&0xF);
20.633 load_reg( R_EAX, Rm );
20.634 check_ralign32( R_EAX );
20.635 - MOV_r32_r32( R_EAX, R_ECX );
20.636 - ADD_imm8s_r32( 4, R_EAX );
20.637 - store_reg( R_EAX, Rm );
20.638 - MEM_READ_LONG( R_ECX, R_EAX );
20.639 + MMU_TRANSLATE_READ( R_EAX );
20.640 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.641 + MEM_READ_LONG( R_EAX, R_EAX );
20.642 store_spreg( R_EAX, R_MACL );
20.643 sh4_x86.tstate = TSTATE_NONE;
20.644 }
20.645 @@ -1540,10 +1591,9 @@
20.646 uint32_t Rm = ((ir>>8)&0xF);
20.647 load_reg( R_EAX, Rm );
20.648 check_ralign32( R_EAX );
20.649 - MOV_r32_r32( R_EAX, R_ECX );
20.650 - ADD_imm8s_r32( 4, R_EAX );
20.651 - store_reg( R_EAX, Rm );
20.652 - MEM_READ_LONG( R_ECX, R_EAX );
20.653 + MMU_TRANSLATE_READ( R_EAX );
20.654 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.655 + MEM_READ_LONG( R_EAX, R_EAX );
20.656 store_spreg( R_EAX, R_PR );
20.657 sh4_x86.tstate = TSTATE_NONE;
20.658 }
20.659 @@ -1554,10 +1604,9 @@
20.660 check_priv();
20.661 load_reg( R_EAX, Rm );
20.662 check_ralign32( R_EAX );
20.663 - MOV_r32_r32( R_EAX, R_ECX );
20.664 - ADD_imm8s_r32( 4, R_EAX );
20.665 - store_reg( R_EAX, Rm );
20.666 - MEM_READ_LONG( R_ECX, R_EAX );
20.667 + MMU_TRANSLATE_READ( R_EAX );
20.668 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.669 + MEM_READ_LONG( R_EAX, R_EAX );
20.670 store_spreg( R_EAX, R_SGR );
20.671 sh4_x86.tstate = TSTATE_NONE;
20.672 }
20.673 @@ -1567,10 +1616,9 @@
20.674 uint32_t Rm = ((ir>>8)&0xF);
20.675 load_reg( R_EAX, Rm );
20.676 check_ralign32( R_EAX );
20.677 - MOV_r32_r32( R_EAX, R_ECX );
20.678 - ADD_imm8s_r32( 4, R_EAX );
20.679 - store_reg( R_EAX, Rm );
20.680 - MEM_READ_LONG( R_ECX, R_EAX );
20.681 + MMU_TRANSLATE_READ( R_EAX );
20.682 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.683 + MEM_READ_LONG( R_EAX, R_EAX );
20.684 store_spreg( R_EAX, R_FPUL );
20.685 sh4_x86.tstate = TSTATE_NONE;
20.686 }
20.687 @@ -1580,10 +1628,9 @@
20.688 uint32_t Rm = ((ir>>8)&0xF);
20.689 load_reg( R_EAX, Rm );
20.690 check_ralign32( R_EAX );
20.691 - MOV_r32_r32( R_EAX, R_ECX );
20.692 - ADD_imm8s_r32( 4, R_EAX );
20.693 - store_reg( R_EAX, Rm );
20.694 - MEM_READ_LONG( R_ECX, R_EAX );
20.695 + MMU_TRANSLATE_READ( R_EAX );
20.696 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.697 + MEM_READ_LONG( R_EAX, R_EAX );
20.698 store_spreg( R_EAX, R_FPSCR );
20.699 update_fr_bank( R_EAX );
20.700 sh4_x86.tstate = TSTATE_NONE;
20.701 @@ -1595,10 +1642,9 @@
20.702 check_priv();
20.703 load_reg( R_EAX, Rm );
20.704 check_ralign32( R_EAX );
20.705 - MOV_r32_r32( R_EAX, R_ECX );
20.706 - ADD_imm8s_r32( 4, R_EAX );
20.707 - store_reg( R_EAX, Rm );
20.708 - MEM_READ_LONG( R_ECX, R_EAX );
20.709 + MMU_TRANSLATE_READ( R_EAX );
20.710 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.711 + MEM_READ_LONG( R_EAX, R_EAX );
20.712 store_spreg( R_EAX, R_DBR );
20.713 sh4_x86.tstate = TSTATE_NONE;
20.714 }
20.715 @@ -1621,10 +1667,9 @@
20.716 check_priv();
20.717 load_reg( R_EAX, Rm );
20.718 check_ralign32( R_EAX );
20.719 - MOV_r32_r32( R_EAX, R_ECX );
20.720 - ADD_imm8s_r32( 4, R_EAX );
20.721 - store_reg( R_EAX, Rm );
20.722 - MEM_READ_LONG( R_ECX, R_EAX );
20.723 + MMU_TRANSLATE_READ( R_EAX );
20.724 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.725 + MEM_READ_LONG( R_EAX, R_EAX );
20.726 call_func1( sh4_write_sr, R_EAX );
20.727 sh4_x86.priv_checked = FALSE;
20.728 sh4_x86.fpuen_checked = FALSE;
20.729 @@ -1637,10 +1682,9 @@
20.730 uint32_t Rm = ((ir>>8)&0xF);
20.731 load_reg( R_EAX, Rm );
20.732 check_ralign32( R_EAX );
20.733 - MOV_r32_r32( R_EAX, R_ECX );
20.734 - ADD_imm8s_r32( 4, R_EAX );
20.735 - store_reg( R_EAX, Rm );
20.736 - MEM_READ_LONG( R_ECX, R_EAX );
20.737 + MMU_TRANSLATE_READ( R_EAX );
20.738 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.739 + MEM_READ_LONG( R_EAX, R_EAX );
20.740 store_spreg( R_EAX, R_GBR );
20.741 sh4_x86.tstate = TSTATE_NONE;
20.742 }
20.743 @@ -1651,10 +1695,9 @@
20.744 check_priv();
20.745 load_reg( R_EAX, Rm );
20.746 check_ralign32( R_EAX );
20.747 - MOV_r32_r32( R_EAX, R_ECX );
20.748 - ADD_imm8s_r32( 4, R_EAX );
20.749 - store_reg( R_EAX, Rm );
20.750 - MEM_READ_LONG( R_ECX, R_EAX );
20.751 + MMU_TRANSLATE_READ( R_EAX );
20.752 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.753 + MEM_READ_LONG( R_EAX, R_EAX );
20.754 store_spreg( R_EAX, R_VBR );
20.755 sh4_x86.tstate = TSTATE_NONE;
20.756 }
20.757 @@ -1665,10 +1708,9 @@
20.758 check_priv();
20.759 load_reg( R_EAX, Rm );
20.760 check_ralign32( R_EAX );
20.761 - MOV_r32_r32( R_EAX, R_ECX );
20.762 - ADD_imm8s_r32( 4, R_EAX );
20.763 - store_reg( R_EAX, Rm );
20.764 - MEM_READ_LONG( R_ECX, R_EAX );
20.765 + MMU_TRANSLATE_READ( R_EAX );
20.766 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.767 + MEM_READ_LONG( R_EAX, R_EAX );
20.768 store_spreg( R_EAX, R_SSR );
20.769 sh4_x86.tstate = TSTATE_NONE;
20.770 }
20.771 @@ -1679,10 +1721,9 @@
20.772 check_priv();
20.773 load_reg( R_EAX, Rm );
20.774 check_ralign32( R_EAX );
20.775 - MOV_r32_r32( R_EAX, R_ECX );
20.776 - ADD_imm8s_r32( 4, R_EAX );
20.777 - store_reg( R_EAX, Rm );
20.778 - MEM_READ_LONG( R_ECX, R_EAX );
20.779 + MMU_TRANSLATE_READ( R_EAX );
20.780 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.781 + MEM_READ_LONG( R_EAX, R_EAX );
20.782 store_spreg( R_EAX, R_SPC );
20.783 sh4_x86.tstate = TSTATE_NONE;
20.784 }
20.785 @@ -1698,10 +1739,9 @@
20.786 check_priv();
20.787 load_reg( R_EAX, Rm );
20.788 check_ralign32( R_EAX );
20.789 - MOV_r32_r32( R_EAX, R_ECX );
20.790 - ADD_imm8s_r32( 4, R_EAX );
20.791 - store_reg( R_EAX, Rm );
20.792 - MEM_READ_LONG( R_ECX, R_EAX );
20.793 + MMU_TRANSLATE_READ( R_EAX );
20.794 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.795 + MEM_READ_LONG( R_EAX, R_EAX );
20.796 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
20.797 sh4_x86.tstate = TSTATE_NONE;
20.798 }
20.799 @@ -1861,12 +1901,14 @@
20.800 case 0x1:
20.801 { /* TAS.B @Rn */
20.802 uint32_t Rn = ((ir>>8)&0xF);
20.803 - load_reg( R_ECX, Rn );
20.804 - MEM_READ_BYTE( R_ECX, R_EAX );
20.805 + load_reg( R_EAX, Rn );
20.806 + MMU_TRANSLATE_WRITE( R_EAX );
20.807 + PUSH_realigned_r32( R_EAX );
20.808 + MEM_READ_BYTE( R_EAX, R_EAX );
20.809 TEST_r8_r8( R_AL, R_AL );
20.810 SETE_t();
20.811 OR_imm8_r8( 0x80, R_AL );
20.812 - load_reg( R_ECX, Rn );
20.813 + POP_realigned_r32( R_ECX );
20.814 MEM_WRITE_BYTE( R_ECX, R_EAX );
20.815 sh4_x86.tstate = TSTATE_NONE;
20.816 }
20.817 @@ -2019,15 +2061,31 @@
20.818 case 0xF:
20.819 { /* MAC.W @Rm+, @Rn+ */
20.820 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.821 - load_reg( R_ECX, Rm );
20.822 - check_ralign16( R_ECX );
20.823 - load_reg( R_ECX, Rn );
20.824 - check_ralign16( R_ECX );
20.825 - ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
20.826 - MEM_READ_WORD( R_ECX, R_EAX );
20.827 - PUSH_realigned_r32( R_EAX );
20.828 - load_reg( R_ECX, Rm );
20.829 - ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
20.830 + if( Rm == Rn ) {
20.831 + load_reg( R_EAX, Rm );
20.832 + check_ralign16( R_EAX );
20.833 + MMU_TRANSLATE_READ( R_EAX );
20.834 + PUSH_realigned_r32( R_EAX );
20.835 + load_reg( R_EAX, Rn );
20.836 + ADD_imm8s_r32( 2, R_EAX );
20.837 + MMU_TRANSLATE_READ( R_EAX );
20.838 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
20.839 + // Note translate twice in case of page boundaries. Maybe worth
20.840 + // adding a page-boundary check to skip the second translation
20.841 + } else {
20.842 + load_reg( R_EAX, Rm );
20.843 + check_ralign16( R_EAX );
20.844 + MMU_TRANSLATE_READ( R_EAX );
20.845 + PUSH_realigned_r32( R_EAX );
20.846 + load_reg( R_EAX, Rn );
20.847 + check_ralign16( R_EAX );
20.848 + MMU_TRANSLATE_READ( R_EAX );
20.849 + ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
20.850 + ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
20.851 + }
20.852 + MEM_READ_WORD( R_EAX, R_EAX );
20.853 + POP_r32( R_ECX );
20.854 + PUSH_r32( R_EAX );
20.855 MEM_READ_WORD( R_ECX, R_EAX );
20.856 POP_realigned_r32( R_ECX );
20.857 IMUL_r32( R_ECX );
20.858 @@ -2064,10 +2122,11 @@
20.859 case 0x5:
20.860 { /* MOV.L @(disp, Rm), Rn */
20.861 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
20.862 - load_reg( R_ECX, Rm );
20.863 - ADD_imm8s_r32( disp, R_ECX );
20.864 - check_ralign32( R_ECX );
20.865 - MEM_READ_LONG( R_ECX, R_EAX );
20.866 + load_reg( R_EAX, Rm );
20.867 + ADD_imm8s_r32( disp, R_EAX );
20.868 + check_ralign32( R_EAX );
20.869 + MMU_TRANSLATE_READ( R_EAX );
20.870 + MEM_READ_LONG( R_EAX, R_EAX );
20.871 store_reg( R_EAX, Rn );
20.872 sh4_x86.tstate = TSTATE_NONE;
20.873 }
20.874 @@ -2077,8 +2136,9 @@
20.875 case 0x0:
20.876 { /* MOV.B @Rm, Rn */
20.877 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.878 - load_reg( R_ECX, Rm );
20.879 - MEM_READ_BYTE( R_ECX, R_EAX );
20.880 + load_reg( R_EAX, Rm );
20.881 + MMU_TRANSLATE_READ( R_EAX );
20.882 + MEM_READ_BYTE( R_EAX, R_EAX );
20.883 store_reg( R_EAX, Rn );
20.884 sh4_x86.tstate = TSTATE_NONE;
20.885 }
20.886 @@ -2086,9 +2146,10 @@
20.887 case 0x1:
20.888 { /* MOV.W @Rm, Rn */
20.889 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.890 - load_reg( R_ECX, Rm );
20.891 - check_ralign16( R_ECX );
20.892 - MEM_READ_WORD( R_ECX, R_EAX );
20.893 + load_reg( R_EAX, Rm );
20.894 + check_ralign16( R_EAX );
20.895 + MMU_TRANSLATE_READ( R_EAX );
20.896 + MEM_READ_WORD( R_EAX, R_EAX );
20.897 store_reg( R_EAX, Rn );
20.898 sh4_x86.tstate = TSTATE_NONE;
20.899 }
20.900 @@ -2096,9 +2157,10 @@
20.901 case 0x2:
20.902 { /* MOV.L @Rm, Rn */
20.903 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.904 - load_reg( R_ECX, Rm );
20.905 - check_ralign32( R_ECX );
20.906 - MEM_READ_LONG( R_ECX, R_EAX );
20.907 + load_reg( R_EAX, Rm );
20.908 + check_ralign32( R_EAX );
20.909 + MMU_TRANSLATE_READ( R_EAX );
20.910 + MEM_READ_LONG( R_EAX, R_EAX );
20.911 store_reg( R_EAX, Rn );
20.912 sh4_x86.tstate = TSTATE_NONE;
20.913 }
20.914 @@ -2113,11 +2175,10 @@
20.915 case 0x4:
20.916 { /* MOV.B @Rm+, Rn */
20.917 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.918 - load_reg( R_ECX, Rm );
20.919 - MOV_r32_r32( R_ECX, R_EAX );
20.920 - ADD_imm8s_r32( 1, R_EAX );
20.921 - store_reg( R_EAX, Rm );
20.922 - MEM_READ_BYTE( R_ECX, R_EAX );
20.923 + load_reg( R_EAX, Rm );
20.924 + MMU_TRANSLATE_READ( R_EAX );
20.925 + ADD_imm8s_sh4r( 1, REG_OFFSET(r[Rm]) );
20.926 + MEM_READ_BYTE( R_EAX, R_EAX );
20.927 store_reg( R_EAX, Rn );
20.928 sh4_x86.tstate = TSTATE_NONE;
20.929 }
20.930 @@ -2127,10 +2188,9 @@
20.931 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.932 load_reg( R_EAX, Rm );
20.933 check_ralign16( R_EAX );
20.934 - MOV_r32_r32( R_EAX, R_ECX );
20.935 - ADD_imm8s_r32( 2, R_EAX );
20.936 - store_reg( R_EAX, Rm );
20.937 - MEM_READ_WORD( R_ECX, R_EAX );
20.938 + MMU_TRANSLATE_READ( R_EAX );
20.939 + ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
20.940 + MEM_READ_WORD( R_EAX, R_EAX );
20.941 store_reg( R_EAX, Rn );
20.942 sh4_x86.tstate = TSTATE_NONE;
20.943 }
20.944 @@ -2140,10 +2200,9 @@
20.945 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.946 load_reg( R_EAX, Rm );
20.947 check_ralign32( R_EAX );
20.948 - MOV_r32_r32( R_EAX, R_ECX );
20.949 - ADD_imm8s_r32( 4, R_EAX );
20.950 - store_reg( R_EAX, Rm );
20.951 - MEM_READ_LONG( R_ECX, R_EAX );
20.952 + MMU_TRANSLATE_READ( R_EAX );
20.953 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.954 + MEM_READ_LONG( R_EAX, R_EAX );
20.955 store_reg( R_EAX, Rn );
20.956 sh4_x86.tstate = TSTATE_NONE;
20.957 }
20.958 @@ -2246,30 +2305,33 @@
20.959 case 0x0:
20.960 { /* MOV.B R0, @(disp, Rn) */
20.961 uint32_t Rn = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
20.962 - load_reg( R_EAX, 0 );
20.963 - load_reg( R_ECX, Rn );
20.964 - ADD_imm32_r32( disp, R_ECX );
20.965 - MEM_WRITE_BYTE( R_ECX, R_EAX );
20.966 + load_reg( R_EAX, Rn );
20.967 + ADD_imm32_r32( disp, R_EAX );
20.968 + MMU_TRANSLATE_WRITE( R_EAX );
20.969 + load_reg( R_EDX, 0 );
20.970 + MEM_WRITE_BYTE( R_EAX, R_EDX );
20.971 sh4_x86.tstate = TSTATE_NONE;
20.972 }
20.973 break;
20.974 case 0x1:
20.975 { /* MOV.W R0, @(disp, Rn) */
20.976 uint32_t Rn = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
20.977 - load_reg( R_ECX, Rn );
20.978 - load_reg( R_EAX, 0 );
20.979 - ADD_imm32_r32( disp, R_ECX );
20.980 - check_walign16( R_ECX );
20.981 - MEM_WRITE_WORD( R_ECX, R_EAX );
20.982 + load_reg( R_EAX, Rn );
20.983 + ADD_imm32_r32( disp, R_EAX );
20.984 + check_walign16( R_EAX );
20.985 + MMU_TRANSLATE_WRITE( R_EAX );
20.986 + load_reg( R_EDX, 0 );
20.987 + MEM_WRITE_WORD( R_EAX, R_EDX );
20.988 sh4_x86.tstate = TSTATE_NONE;
20.989 }
20.990 break;
20.991 case 0x4:
20.992 { /* MOV.B @(disp, Rm), R0 */
20.993 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
20.994 - load_reg( R_ECX, Rm );
20.995 - ADD_imm32_r32( disp, R_ECX );
20.996 - MEM_READ_BYTE( R_ECX, R_EAX );
20.997 + load_reg( R_EAX, Rm );
20.998 + ADD_imm32_r32( disp, R_EAX );
20.999 + MMU_TRANSLATE_READ( R_EAX );
20.1000 + MEM_READ_BYTE( R_EAX, R_EAX );
20.1001 store_reg( R_EAX, 0 );
20.1002 sh4_x86.tstate = TSTATE_NONE;
20.1003 }
20.1004 @@ -2277,10 +2339,11 @@
20.1005 case 0x5:
20.1006 { /* MOV.W @(disp, Rm), R0 */
20.1007 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
20.1008 - load_reg( R_ECX, Rm );
20.1009 - ADD_imm32_r32( disp, R_ECX );
20.1010 - check_ralign16( R_ECX );
20.1011 - MEM_READ_WORD( R_ECX, R_EAX );
20.1012 + load_reg( R_EAX, Rm );
20.1013 + ADD_imm32_r32( disp, R_EAX );
20.1014 + check_ralign16( R_EAX );
20.1015 + MMU_TRANSLATE_READ( R_EAX );
20.1016 + MEM_READ_WORD( R_EAX, R_EAX );
20.1017 store_reg( R_EAX, 0 );
20.1018 sh4_x86.tstate = TSTATE_NONE;
20.1019 }
20.1020 @@ -2300,8 +2363,9 @@
20.1021 if( sh4_x86.in_delay_slot ) {
20.1022 SLOTILLEGAL();
20.1023 } else {
20.1024 - JF_rel8( EXIT_BLOCK_SIZE, nottaken );
20.1025 - exit_block( disp + pc + 4, pc+2 );
20.1026 + sh4vma_t target = disp + pc + 4;
20.1027 + JF_rel8( EXIT_BLOCK_REL_SIZE(target), nottaken );
20.1028 + exit_block_rel(target, pc+2 );
20.1029 JMP_TARGET(nottaken);
20.1030 return 2;
20.1031 }
20.1032 @@ -2313,8 +2377,9 @@
20.1033 if( sh4_x86.in_delay_slot ) {
20.1034 SLOTILLEGAL();
20.1035 } else {
20.1036 - JT_rel8( EXIT_BLOCK_SIZE, nottaken );
20.1037 - exit_block( disp + pc + 4, pc+2 );
20.1038 + sh4vma_t target = disp + pc + 4;
20.1039 + JT_rel8( EXIT_BLOCK_REL_SIZE(target), nottaken );
20.1040 + exit_block_rel(target, pc+2 );
20.1041 JMP_TARGET(nottaken);
20.1042 return 2;
20.1043 }
20.1044 @@ -2333,7 +2398,7 @@
20.1045 }
20.1046 OP(0x0F); OP(0x80+(sh4_x86.tstate^1)); uint32_t *patch = (uint32_t *)xlat_output; OP32(0); // JE rel32
20.1047 sh4_translate_instruction(pc+2);
20.1048 - exit_block( disp + pc + 4, pc+4 );
20.1049 + exit_block_rel( disp + pc + 4, pc+4 );
20.1050 // not taken
20.1051 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
20.1052 sh4_translate_instruction(pc+2);
20.1053 @@ -2347,6 +2412,7 @@
20.1054 if( sh4_x86.in_delay_slot ) {
20.1055 SLOTILLEGAL();
20.1056 } else {
20.1057 + sh4vma_t target = disp + pc + 4;
20.1058 sh4_x86.in_delay_slot = TRUE;
20.1059 if( sh4_x86.tstate == TSTATE_NONE ) {
20.1060 CMP_imm8s_sh4r( 1, R_T );
20.1061 @@ -2354,7 +2420,7 @@
20.1062 }
20.1063 OP(0x0F); OP(0x80+sh4_x86.tstate); uint32_t *patch = (uint32_t *)xlat_output; OP32(0); // JNE rel32
20.1064 sh4_translate_instruction(pc+2);
20.1065 - exit_block( disp + pc + 4, pc+4 );
20.1066 + exit_block_rel( target, pc+4 );
20.1067 // not taken
20.1068 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
20.1069 sh4_translate_instruction(pc+2);
20.1070 @@ -2380,9 +2446,10 @@
20.1071 MOV_moff32_EAX( ptr );
20.1072 MOVSX_r16_r32( R_EAX, R_EAX );
20.1073 } else {
20.1074 - load_imm32( R_ECX, (pc - sh4_x86.block_start_pc) + disp + 4 );
20.1075 - ADD_sh4r_r32( R_PC, R_ECX );
20.1076 - MEM_READ_WORD( R_ECX, R_EAX );
20.1077 + load_imm32( R_EAX, (pc - sh4_x86.block_start_pc) + disp + 4 );
20.1078 + ADD_sh4r_r32( R_PC, R_EAX );
20.1079 + MMU_TRANSLATE_READ( R_EAX );
20.1080 + MEM_READ_WORD( R_EAX, R_EAX );
20.1081 sh4_x86.tstate = TSTATE_NONE;
20.1082 }
20.1083 store_reg( R_EAX, Rn );
20.1084 @@ -2397,7 +2464,7 @@
20.1085 } else {
20.1086 sh4_x86.in_delay_slot = TRUE;
20.1087 sh4_translate_instruction( pc + 2 );
20.1088 - exit_block( disp + pc + 4, pc+4 );
20.1089 + exit_block_rel( disp + pc + 4, pc+4 );
20.1090 sh4_x86.branch_taken = TRUE;
20.1091 return 4;
20.1092 }
20.1093 @@ -2413,7 +2480,7 @@
20.1094 store_spreg( R_EAX, R_PR );
20.1095 sh4_x86.in_delay_slot = TRUE;
20.1096 sh4_translate_instruction( pc + 2 );
20.1097 - exit_block( disp + pc + 4, pc+4 );
20.1098 + exit_block_rel( disp + pc + 4, pc+4 );
20.1099 sh4_x86.branch_taken = TRUE;
20.1100 return 4;
20.1101 }
20.1102 @@ -2424,32 +2491,35 @@
20.1103 case 0x0:
20.1104 { /* MOV.B R0, @(disp, GBR) */
20.1105 uint32_t disp = (ir&0xFF);
20.1106 - load_reg( R_EAX, 0 );
20.1107 - load_spreg( R_ECX, R_GBR );
20.1108 - ADD_imm32_r32( disp, R_ECX );
20.1109 - MEM_WRITE_BYTE( R_ECX, R_EAX );
20.1110 + load_spreg( R_EAX, R_GBR );
20.1111 + ADD_imm32_r32( disp, R_EAX );
20.1112 + MMU_TRANSLATE_WRITE( R_EAX );
20.1113 + load_reg( R_EDX, 0 );
20.1114 + MEM_WRITE_BYTE( R_EAX, R_EDX );
20.1115 sh4_x86.tstate = TSTATE_NONE;
20.1116 }
20.1117 break;
20.1118 case 0x1:
20.1119 { /* MOV.W R0, @(disp, GBR) */
20.1120 uint32_t disp = (ir&0xFF)<<1;
20.1121 - load_spreg( R_ECX, R_GBR );
20.1122 - load_reg( R_EAX, 0 );
20.1123 - ADD_imm32_r32( disp, R_ECX );
20.1124 - check_walign16( R_ECX );
20.1125 - MEM_WRITE_WORD( R_ECX, R_EAX );
20.1126 + load_spreg( R_EAX, R_GBR );
20.1127 + ADD_imm32_r32( disp, R_EAX );
20.1128 + check_walign16( R_EAX );
20.1129 + MMU_TRANSLATE_WRITE( R_EAX );
20.1130 + load_reg( R_EDX, 0 );
20.1131 + MEM_WRITE_WORD( R_EAX, R_EDX );
20.1132 sh4_x86.tstate = TSTATE_NONE;
20.1133 }
20.1134 break;
20.1135 case 0x2:
20.1136 { /* MOV.L R0, @(disp, GBR) */
20.1137 uint32_t disp = (ir&0xFF)<<2;
20.1138 - load_spreg( R_ECX, R_GBR );
20.1139 - load_reg( R_EAX, 0 );
20.1140 - ADD_imm32_r32( disp, R_ECX );
20.1141 - check_walign32( R_ECX );
20.1142 - MEM_WRITE_LONG( R_ECX, R_EAX );
20.1143 + load_spreg( R_EAX, R_GBR );
20.1144 + ADD_imm32_r32( disp, R_EAX );
20.1145 + check_walign32( R_EAX );
20.1146 + MMU_TRANSLATE_WRITE( R_EAX );
20.1147 + load_reg( R_EDX, 0 );
20.1148 + MEM_WRITE_LONG( R_EAX, R_EDX );
20.1149 sh4_x86.tstate = TSTATE_NONE;
20.1150 }
20.1151 break;
20.1152 @@ -2473,9 +2543,10 @@
20.1153 case 0x4:
20.1154 { /* MOV.B @(disp, GBR), R0 */
20.1155 uint32_t disp = (ir&0xFF);
20.1156 - load_spreg( R_ECX, R_GBR );
20.1157 - ADD_imm32_r32( disp, R_ECX );
20.1158 - MEM_READ_BYTE( R_ECX, R_EAX );
20.1159 + load_spreg( R_EAX, R_GBR );
20.1160 + ADD_imm32_r32( disp, R_EAX );
20.1161 + MMU_TRANSLATE_READ( R_EAX );
20.1162 + MEM_READ_BYTE( R_EAX, R_EAX );
20.1163 store_reg( R_EAX, 0 );
20.1164 sh4_x86.tstate = TSTATE_NONE;
20.1165 }
20.1166 @@ -2483,10 +2554,11 @@
20.1167 case 0x5:
20.1168 { /* MOV.W @(disp, GBR), R0 */
20.1169 uint32_t disp = (ir&0xFF)<<1;
20.1170 - load_spreg( R_ECX, R_GBR );
20.1171 - ADD_imm32_r32( disp, R_ECX );
20.1172 - check_ralign16( R_ECX );
20.1173 - MEM_READ_WORD( R_ECX, R_EAX );
20.1174 + load_spreg( R_EAX, R_GBR );
20.1175 + ADD_imm32_r32( disp, R_EAX );
20.1176 + check_ralign16( R_EAX );
20.1177 + MMU_TRANSLATE_READ( R_EAX );
20.1178 + MEM_READ_WORD( R_EAX, R_EAX );
20.1179 store_reg( R_EAX, 0 );
20.1180 sh4_x86.tstate = TSTATE_NONE;
20.1181 }
20.1182 @@ -2494,10 +2566,11 @@
20.1183 case 0x6:
20.1184 { /* MOV.L @(disp, GBR), R0 */
20.1185 uint32_t disp = (ir&0xFF)<<2;
20.1186 - load_spreg( R_ECX, R_GBR );
20.1187 - ADD_imm32_r32( disp, R_ECX );
20.1188 - check_ralign32( R_ECX );
20.1189 - MEM_READ_LONG( R_ECX, R_EAX );
20.1190 + load_spreg( R_EAX, R_GBR );
20.1191 + ADD_imm32_r32( disp, R_EAX );
20.1192 + check_ralign32( R_EAX );
20.1193 + MMU_TRANSLATE_READ( R_EAX );
20.1194 + MEM_READ_LONG( R_EAX, R_EAX );
20.1195 store_reg( R_EAX, 0 );
20.1196 sh4_x86.tstate = TSTATE_NONE;
20.1197 }
20.1198 @@ -2511,6 +2584,7 @@
20.1199 load_imm32( R_ECX, (pc - sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
20.1200 ADD_sh4r_r32( R_PC, R_ECX );
20.1201 store_reg( R_ECX, 0 );
20.1202 + sh4_x86.tstate = TSTATE_NONE;
20.1203 }
20.1204 }
20.1205 break;
20.1206 @@ -2555,8 +2629,9 @@
20.1207 uint32_t imm = (ir&0xFF);
20.1208 load_reg( R_EAX, 0);
20.1209 load_reg( R_ECX, R_GBR);
20.1210 - ADD_r32_r32( R_EAX, R_ECX );
20.1211 - MEM_READ_BYTE( R_ECX, R_EAX );
20.1212 + ADD_r32_r32( R_ECX, R_EAX );
20.1213 + MMU_TRANSLATE_READ( R_EAX );
20.1214 + MEM_READ_BYTE( R_EAX, R_EAX );
20.1215 TEST_imm8_r8( imm, R_AL );
20.1216 SETE_t();
20.1217 sh4_x86.tstate = TSTATE_E;
20.1218 @@ -2567,9 +2642,10 @@
20.1219 uint32_t imm = (ir&0xFF);
20.1220 load_reg( R_EAX, 0 );
20.1221 load_spreg( R_ECX, R_GBR );
20.1222 - ADD_r32_r32( R_EAX, R_ECX );
20.1223 - PUSH_realigned_r32(R_ECX);
20.1224 - MEM_READ_BYTE( R_ECX, R_EAX );
20.1225 + ADD_r32_r32( R_ECX, R_EAX );
20.1226 + MMU_TRANSLATE_WRITE( R_EAX );
20.1227 + PUSH_realigned_r32(R_EAX);
20.1228 + MEM_READ_BYTE( R_EAX, R_EAX );
20.1229 POP_realigned_r32(R_ECX);
20.1230 AND_imm32_r32(imm, R_EAX );
20.1231 MEM_WRITE_BYTE( R_ECX, R_EAX );
20.1232 @@ -2581,9 +2657,10 @@
20.1233 uint32_t imm = (ir&0xFF);
20.1234 load_reg( R_EAX, 0 );
20.1235 load_spreg( R_ECX, R_GBR );
20.1236 - ADD_r32_r32( R_EAX, R_ECX );
20.1237 - PUSH_realigned_r32(R_ECX);
20.1238 - MEM_READ_BYTE(R_ECX, R_EAX);
20.1239 + ADD_r32_r32( R_ECX, R_EAX );
20.1240 + MMU_TRANSLATE_WRITE( R_EAX );
20.1241 + PUSH_realigned_r32(R_EAX);
20.1242 + MEM_READ_BYTE(R_EAX, R_EAX);
20.1243 POP_realigned_r32(R_ECX);
20.1244 XOR_imm32_r32( imm, R_EAX );
20.1245 MEM_WRITE_BYTE( R_ECX, R_EAX );
20.1246 @@ -2595,9 +2672,10 @@
20.1247 uint32_t imm = (ir&0xFF);
20.1248 load_reg( R_EAX, 0 );
20.1249 load_spreg( R_ECX, R_GBR );
20.1250 - ADD_r32_r32( R_EAX, R_ECX );
20.1251 - PUSH_realigned_r32(R_ECX);
20.1252 - MEM_READ_BYTE( R_ECX, R_EAX );
20.1253 + ADD_r32_r32( R_ECX, R_EAX );
20.1254 + MMU_TRANSLATE_WRITE( R_EAX );
20.1255 + PUSH_realigned_r32(R_EAX);
20.1256 + MEM_READ_BYTE( R_EAX, R_EAX );
20.1257 POP_realigned_r32(R_ECX);
20.1258 OR_imm32_r32(imm, R_EAX );
20.1259 MEM_WRITE_BYTE( R_ECX, R_EAX );
20.1260 @@ -2629,9 +2707,10 @@
20.1261 // Note: we use sh4r.pc for the calc as we could be running at a
20.1262 // different virtual address than the translation was done with,
20.1263 // but we can safely assume that the low bits are the same.
20.1264 - load_imm32( R_ECX, (pc-sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
20.1265 - ADD_sh4r_r32( R_PC, R_ECX );
20.1266 - MEM_READ_LONG( R_ECX, R_EAX );
20.1267 + load_imm32( R_EAX, (pc-sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
20.1268 + ADD_sh4r_r32( R_PC, R_EAX );
20.1269 + MMU_TRANSLATE_READ( R_EAX );
20.1270 + MEM_READ_LONG( R_EAX, R_EAX );
20.1271 sh4_x86.tstate = TSTATE_NONE;
20.1272 }
20.1273 store_reg( R_EAX, Rn );
20.1274 @@ -2781,31 +2860,32 @@
20.1275 { /* FMOV @(R0, Rm), FRn */
20.1276 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.1277 check_fpuen();
20.1278 - load_reg( R_ECX, Rm );
20.1279 - ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX );
20.1280 - check_ralign32( R_ECX );
20.1281 + load_reg( R_EAX, Rm );
20.1282 + ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
20.1283 + check_ralign32( R_EAX );
20.1284 + MMU_TRANSLATE_READ( R_EAX );
20.1285 load_spreg( R_EDX, R_FPSCR );
20.1286 TEST_imm32_r32( FPSCR_SZ, R_EDX );
20.1287 JNE_rel8(8 + MEM_READ_SIZE, doublesize);
20.1288 - MEM_READ_LONG( R_ECX, R_EAX );
20.1289 + MEM_READ_LONG( R_EAX, R_EAX );
20.1290 load_fr_bank( R_EDX );
20.1291 store_fr( R_EDX, R_EAX, FRn );
20.1292 if( FRn&1 ) {
20.1293 JMP_rel8(21 + MEM_READ_DOUBLE_SIZE, end);
20.1294 JMP_TARGET(doublesize);
20.1295 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
20.1296 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
20.1297 load_spreg( R_EDX, R_FPSCR ); // assume read_long clobbered it
20.1298 load_xf_bank( R_EDX );
20.1299 - store_fr( R_EDX, R_EAX, FRn&0x0E );
20.1300 - store_fr( R_EDX, R_ECX, FRn|0x01 );
20.1301 + store_fr( R_EDX, R_ECX, FRn&0x0E );
20.1302 + store_fr( R_EDX, R_EAX, FRn|0x01 );
20.1303 JMP_TARGET(end);
20.1304 } else {
20.1305 JMP_rel8(9 + MEM_READ_DOUBLE_SIZE, end);
20.1306 JMP_TARGET(doublesize);
20.1307 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
20.1308 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
20.1309 load_fr_bank( R_EDX );
20.1310 - store_fr( R_EDX, R_EAX, FRn&0x0E );
20.1311 - store_fr( R_EDX, R_ECX, FRn|0x01 );
20.1312 + store_fr( R_EDX, R_ECX, FRn&0x0E );
20.1313 + store_fr( R_EDX, R_EAX, FRn|0x01 );
20.1314 JMP_TARGET(end);
20.1315 }
20.1316 sh4_x86.tstate = TSTATE_NONE;
20.1317 @@ -2815,30 +2895,31 @@
20.1318 { /* FMOV FRm, @(R0, Rn) */
20.1319 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
20.1320 check_fpuen();
20.1321 - load_reg( R_ECX, Rn );
20.1322 - ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX );
20.1323 - check_walign32( R_ECX );
20.1324 + load_reg( R_EAX, Rn );
20.1325 + ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
20.1326 + check_walign32( R_EAX );
20.1327 + MMU_TRANSLATE_WRITE( R_EAX );
20.1328 load_spreg( R_EDX, R_FPSCR );
20.1329 TEST_imm32_r32( FPSCR_SZ, R_EDX );
20.1330 JNE_rel8(8 + MEM_WRITE_SIZE, doublesize);
20.1331 load_fr_bank( R_EDX );
20.1332 - load_fr( R_EDX, R_EAX, FRm );
20.1333 - MEM_WRITE_LONG( R_ECX, R_EAX ); // 12
20.1334 + load_fr( R_EDX, R_ECX, FRm );
20.1335 + MEM_WRITE_LONG( R_EAX, R_ECX ); // 12
20.1336 if( FRm&1 ) {
20.1337 JMP_rel8( 18 + MEM_WRITE_DOUBLE_SIZE, end );
20.1338 JMP_TARGET(doublesize);
20.1339 load_xf_bank( R_EDX );
20.1340 - load_fr( R_EDX, R_EAX, FRm&0x0E );
20.1341 + load_fr( R_EDX, R_ECX, FRm&0x0E );
20.1342 load_fr( R_EDX, R_EDX, FRm|0x01 );
20.1343 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
20.1344 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
20.1345 JMP_TARGET(end);
20.1346 } else {
20.1347 JMP_rel8( 9 + MEM_WRITE_DOUBLE_SIZE, end );
20.1348 JMP_TARGET(doublesize);
20.1349 load_fr_bank( R_EDX );
20.1350 - load_fr( R_EDX, R_EAX, FRm&0x0E );
20.1351 + load_fr( R_EDX, R_ECX, FRm&0x0E );
20.1352 load_fr( R_EDX, R_EDX, FRm|0x01 );
20.1353 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
20.1354 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
20.1355 JMP_TARGET(end);
20.1356 }
20.1357 sh4_x86.tstate = TSTATE_NONE;
20.1358 @@ -2848,30 +2929,31 @@
20.1359 { /* FMOV @Rm, FRn */
20.1360 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.1361 check_fpuen();
20.1362 - load_reg( R_ECX, Rm );
20.1363 - check_ralign32( R_ECX );
20.1364 + load_reg( R_EAX, Rm );
20.1365 + check_ralign32( R_EAX );
20.1366 + MMU_TRANSLATE_READ( R_EAX );
20.1367 load_spreg( R_EDX, R_FPSCR );
20.1368 TEST_imm32_r32( FPSCR_SZ, R_EDX );
20.1369 JNE_rel8(8 + MEM_READ_SIZE, doublesize);
20.1370 - MEM_READ_LONG( R_ECX, R_EAX );
20.1371 + MEM_READ_LONG( R_EAX, R_EAX );
20.1372 load_fr_bank( R_EDX );
20.1373 store_fr( R_EDX, R_EAX, FRn );
20.1374 if( FRn&1 ) {
20.1375 JMP_rel8(21 + MEM_READ_DOUBLE_SIZE, end);
20.1376 JMP_TARGET(doublesize);
20.1377 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
20.1378 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
20.1379 load_spreg( R_EDX, R_FPSCR ); // assume read_long clobbered it
20.1380 load_xf_bank( R_EDX );
20.1381 - store_fr( R_EDX, R_EAX, FRn&0x0E );
20.1382 - store_fr( R_EDX, R_ECX, FRn|0x01 );
20.1383 + store_fr( R_EDX, R_ECX, FRn&0x0E );
20.1384 + store_fr( R_EDX, R_EAX, FRn|0x01 );
20.1385 JMP_TARGET(end);
20.1386 } else {
20.1387 JMP_rel8(9 + MEM_READ_DOUBLE_SIZE, end);
20.1388 JMP_TARGET(doublesize);
20.1389 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
20.1390 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
20.1391 load_fr_bank( R_EDX );
20.1392 - store_fr( R_EDX, R_EAX, FRn&0x0E );
20.1393 - store_fr( R_EDX, R_ECX, FRn|0x01 );
20.1394 + store_fr( R_EDX, R_ECX, FRn&0x0E );
20.1395 + store_fr( R_EDX, R_EAX, FRn|0x01 );
20.1396 JMP_TARGET(end);
20.1397 }
20.1398 sh4_x86.tstate = TSTATE_NONE;
20.1399 @@ -2881,36 +2963,33 @@
20.1400 { /* FMOV @Rm+, FRn */
20.1401 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
20.1402 check_fpuen();
20.1403 - load_reg( R_ECX, Rm );
20.1404 - check_ralign32( R_ECX );
20.1405 - MOV_r32_r32( R_ECX, R_EAX );
20.1406 + load_reg( R_EAX, Rm );
20.1407 + check_ralign32( R_EAX );
20.1408 + MMU_TRANSLATE_READ( R_EAX );
20.1409 load_spreg( R_EDX, R_FPSCR );
20.1410 TEST_imm32_r32( FPSCR_SZ, R_EDX );
20.1411 - JNE_rel8(14 + MEM_READ_SIZE, doublesize);
20.1412 - ADD_imm8s_r32( 4, R_EAX );
20.1413 - store_reg( R_EAX, Rm );
20.1414 - MEM_READ_LONG( R_ECX, R_EAX );
20.1415 + JNE_rel8(12 + MEM_READ_SIZE, doublesize);
20.1416 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
20.1417 + MEM_READ_LONG( R_EAX, R_EAX );
20.1418 load_fr_bank( R_EDX );
20.1419 store_fr( R_EDX, R_EAX, FRn );
20.1420 if( FRn&1 ) {
20.1421 - JMP_rel8(27 + MEM_READ_DOUBLE_SIZE, end);
20.1422 + JMP_rel8(25 + MEM_READ_DOUBLE_SIZE, end);
20.1423 JMP_TARGET(doublesize);
20.1424 - ADD_imm8s_r32( 8, R_EAX );
20.1425 - store_reg(R_EAX, Rm);
20.1426 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
20.1427 + ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rm]) );
20.1428 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
20.1429 load_spreg( R_EDX, R_FPSCR ); // assume read_long clobbered it
20.1430 load_xf_bank( R_EDX );
20.1431 - store_fr( R_EDX, R_EAX, FRn&0x0E );
20.1432 - store_fr( R_EDX, R_ECX, FRn|0x01 );
20.1433 + store_fr( R_EDX, R_ECX, FRn&0x0E );
20.1434 + store_fr( R_EDX, R_EAX, FRn|0x01 );
20.1435 JMP_TARGET(end);
20.1436 } else {
20.1437 - JMP_rel8(15 + MEM_READ_DOUBLE_SIZE, end);
20.1438 - ADD_imm8s_r32( 8, R_EAX );
20.1439 - store_reg(R_EAX, Rm);
20.1440 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
20.1441 + JMP_rel8(13 + MEM_READ_DOUBLE_SIZE, end);
20.1442 + ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rm]) );
20.1443 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
20.1444 load_fr_bank( R_EDX );
20.1445 - store_fr( R_EDX, R_EAX, FRn&0x0E );
20.1446 - store_fr( R_EDX, R_ECX, FRn|0x01 );
20.1447 + store_fr( R_EDX, R_ECX, FRn&0x0E );
20.1448 + store_fr( R_EDX, R_EAX, FRn|0x01 );
20.1449 JMP_TARGET(end);
20.1450 }
20.1451 sh4_x86.tstate = TSTATE_NONE;
20.1452 @@ -2920,29 +2999,30 @@
20.1453 { /* FMOV FRm, @Rn */
20.1454 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
20.1455 check_fpuen();
20.1456 - load_reg( R_ECX, Rn );
20.1457 - check_walign32( R_ECX );
20.1458 + load_reg( R_EAX, Rn );
20.1459 + check_walign32( R_EAX );
20.1460 + MMU_TRANSLATE_WRITE( R_EAX );
20.1461 load_spreg( R_EDX, R_FPSCR );
20.1462 TEST_imm32_r32( FPSCR_SZ, R_EDX );
20.1463 JNE_rel8(8 + MEM_WRITE_SIZE, doublesize);
20.1464 load_fr_bank( R_EDX );
20.1465 - load_fr( R_EDX, R_EAX, FRm );
20.1466 - MEM_WRITE_LONG( R_ECX, R_EAX ); // 12
20.1467 + load_fr( R_EDX, R_ECX, FRm );
20.1468 + MEM_WRITE_LONG( R_EAX, R_ECX ); // 12
20.1469 if( FRm&1 ) {
20.1470 JMP_rel8( 18 + MEM_WRITE_DOUBLE_SIZE, end );
20.1471 JMP_TARGET(doublesize);
20.1472 load_xf_bank( R_EDX );
20.1473 - load_fr( R_EDX, R_EAX, FRm&0x0E );
20.1474 + load_fr( R_EDX, R_ECX, FRm&0x0E );
20.1475 load_fr( R_EDX, R_EDX, FRm|0x01 );
20.1476 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
20.1477 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
20.1478 JMP_TARGET(end);
20.1479 } else {
20.1480 JMP_rel8( 9 + MEM_WRITE_DOUBLE_SIZE, end );
20.1481 JMP_TARGET(doublesize);
20.1482 load_fr_bank( R_EDX );
20.1483 - load_fr( R_EDX, R_EAX, FRm&0x0E );
20.1484 + load_fr( R_EDX, R_ECX, FRm&0x0E );
20.1485 load_fr( R_EDX, R_EDX, FRm|0x01 );
20.1486 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
20.1487 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
20.1488 JMP_TARGET(end);
20.1489 }
20.1490 sh4_x86.tstate = TSTATE_NONE;
20.1491 @@ -2952,35 +3032,38 @@
20.1492 { /* FMOV FRm, @-Rn */
20.1493 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
20.1494 check_fpuen();
20.1495 - load_reg( R_ECX, Rn );
20.1496 - check_walign32( R_ECX );
20.1497 + load_reg( R_EAX, Rn );
20.1498 + check_walign32( R_EAX );
20.1499 load_spreg( R_EDX, R_FPSCR );
20.1500 TEST_imm32_r32( FPSCR_SZ, R_EDX );
20.1501 - JNE_rel8(14 + MEM_WRITE_SIZE, doublesize);
20.1502 + JNE_rel8(15 + MEM_WRITE_SIZE + MMU_TRANSLATE_SIZE, doublesize);
20.1503 + ADD_imm8s_r32( -4, R_EAX );
20.1504 + MMU_TRANSLATE_WRITE( R_EAX );
20.1505 load_fr_bank( R_EDX );
20.1506 - load_fr( R_EDX, R_EAX, FRm );
20.1507 - ADD_imm8s_r32(-4,R_ECX);
20.1508 - store_reg( R_ECX, Rn );
20.1509 - MEM_WRITE_LONG( R_ECX, R_EAX ); // 12
20.1510 + load_fr( R_EDX, R_ECX, FRm );
20.1511 + ADD_imm8s_sh4r(-4,REG_OFFSET(r[Rn]));
20.1512 + MEM_WRITE_LONG( R_EAX, R_ECX ); // 12
20.1513 if( FRm&1 ) {
20.1514 - JMP_rel8( 24 + MEM_WRITE_DOUBLE_SIZE, end );
20.1515 + JMP_rel8( 25 + MEM_WRITE_DOUBLE_SIZE + MMU_TRANSLATE_SIZE, end );
20.1516 JMP_TARGET(doublesize);
20.1517 + ADD_imm8s_r32(-8,R_EAX);
20.1518 + MMU_TRANSLATE_WRITE( R_EAX );
20.1519 load_xf_bank( R_EDX );
20.1520 - load_fr( R_EDX, R_EAX, FRm&0x0E );
20.1521 + load_fr( R_EDX, R_ECX, FRm&0x0E );
20.1522 load_fr( R_EDX, R_EDX, FRm|0x01 );
20.1523 - ADD_imm8s_r32(-8,R_ECX);
20.1524 - store_reg( R_ECX, Rn );
20.1525 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
20.1526 + ADD_imm8s_sh4r(-8,REG_OFFSET(r[Rn]));
20.1527 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
20.1528 JMP_TARGET(end);
20.1529 } else {
20.1530 - JMP_rel8( 15 + MEM_WRITE_DOUBLE_SIZE, end );
20.1531 + JMP_rel8( 16 + MEM_WRITE_DOUBLE_SIZE + MMU_TRANSLATE_SIZE, end );
20.1532 JMP_TARGET(doublesize);
20.1533 + ADD_imm8s_r32(-8,R_EAX);
20.1534 + MMU_TRANSLATE_WRITE( R_EAX );
20.1535 load_fr_bank( R_EDX );
20.1536 - load_fr( R_EDX, R_EAX, FRm&0x0E );
20.1537 + load_fr( R_EDX, R_ECX, FRm&0x0E );
20.1538 load_fr( R_EDX, R_EDX, FRm|0x01 );
20.1539 - ADD_imm8s_r32(-8,R_ECX);
20.1540 - store_reg( R_ECX, Rn );
20.1541 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
20.1542 + ADD_imm8s_sh4r(-8,REG_OFFSET(r[Rn]));
20.1543 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
20.1544 JMP_TARGET(end);
20.1545 }
20.1546 sh4_x86.tstate = TSTATE_NONE;
21.1 --- a/src/sh4/sh4x86.in Sun Jan 06 12:24:18 2008 +0000
21.2 +++ b/src/sh4/sh4x86.in Thu Jan 10 08:28:37 2008 +0000
21.3 @@ -40,6 +40,8 @@
21.4 uint32_t exc_code;
21.5 };
21.6
21.7 +#define MAX_RECOVERY_SIZE 2048
21.8 +
21.9 /**
21.10 * Struct to manage internal translation state. This state is not saved -
21.11 * it is only valid between calls to sh4_translate_begin_block() and
21.12 @@ -61,6 +63,8 @@
21.13 struct backpatch_record *backpatch_list;
21.14 uint32_t backpatch_posn;
21.15 uint32_t backpatch_size;
21.16 + struct xlat_recovery_record recovery_list[MAX_RECOVERY_SIZE];
21.17 + uint32_t recovery_posn;
21.18 };
21.19
21.20 #define TSTATE_NONE -1
21.21 @@ -115,6 +119,13 @@
21.22 sh4_x86.backpatch_posn++;
21.23 }
21.24
21.25 +void sh4_x86_add_recovery( uint32_t pc )
21.26 +{
21.27 + xlat_recovery[xlat_recovery_posn].xlat_pc = (uintptr_t)xlat_output;
21.28 + xlat_recovery[xlat_recovery_posn].sh4_icount = (pc - sh4_x86.block_start_pc)>>1;
21.29 + xlat_recovery_posn++;
21.30 +}
21.31 +
21.32 /**
21.33 * Emit an instruction to load an SH4 reg into a real register
21.34 */
21.35 @@ -309,34 +320,27 @@
21.36
21.37 #define UNDEF()
21.38 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
21.39 -#define MEM_READ_BYTE_PHYS( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
21.40 -#define MEM_READ_WORD_PHYS( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
21.41 -#define MEM_READ_LONG_PHYS( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
21.42 -#define MEM_WRITE_BYTE_PHYS( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
21.43 -#define MEM_WRITE_WORD_PHYS( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
21.44 -#define MEM_WRITE_LONG_PHYS( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
21.45 +#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
21.46 +#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
21.47 +#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
21.48 +#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
21.49 +#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
21.50 +#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
21.51
21.52 -#define MEM_READ_BYTE_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_byte, R_EAX); MEM_RESULT(value_reg)
21.53 -#define MEM_READ_WORD_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_word, R_EAX); MEM_RESULT(value_reg)
21.54 -#define MEM_READ_LONG_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_long, R_EAX); MEM_RESULT(value_reg)
21.55 -#define MEM_WRITE_BYTE_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_byte, R_EAX, value_reg)
21.56 -#define MEM_WRITE_WORD_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_word, R_EAX, value_reg)
21.57 -#define MEM_WRITE_LONG_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_long, R_EAX, value_reg)
21.58 +/**
21.59 + * Perform MMU translation on the address in addr_reg for a read operation, iff the TLB is turned
21.60 + * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
21.61 + */
21.62 +#define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
21.63 +/**
21.64 + * Perform MMU translation on the address in addr_reg for a write operation, iff the TLB is turned
21.65 + * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
21.66 + */
21.67 +#define MMU_TRANSLATE_WRITE( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
21.68
21.69 -#define MEM_READ_BYTE( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_BYTE_VMA(addr_reg,value_reg);}else{MEM_READ_BYTE_PHYS(addr_reg, value_reg);}
21.70 -#define MEM_READ_WORD( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_WORD_VMA(addr_reg,value_reg);}else{MEM_READ_WORD_PHYS(addr_reg, value_reg);}
21.71 -#define MEM_READ_LONG( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_LONG_VMA(addr_reg,value_reg);}else{MEM_READ_LONG_PHYS(addr_reg, value_reg);}
21.72 -#define MEM_WRITE_BYTE( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_BYTE_VMA(addr_reg,value_reg);}else{MEM_WRITE_BYTE_PHYS(addr_reg, value_reg);}
21.73 -#define MEM_WRITE_WORD( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_WORD_VMA(addr_reg,value_reg);}else{MEM_WRITE_WORD_PHYS(addr_reg, value_reg);}
21.74 -#define MEM_WRITE_LONG( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_LONG_VMA(addr_reg,value_reg);}else{MEM_WRITE_LONG_PHYS(addr_reg, value_reg);}
21.75 -
21.76 -#define MEM_READ_SIZE_PHYS (CALL_FUNC1_SIZE)
21.77 -#define MEM_WRITE_SIZE_PHYS (CALL_FUNC2_SIZE)
21.78 -#define MEM_READ_SIZE_VMA (CALL_FUNC1_SIZE + CALL_FUNC1_SIZE + 12)
21.79 -#define MEM_WRITE_SIZE_VMA (CALL_FUNC1_SIZE + CALL_FUNC2_SIZE + 12)
21.80 -
21.81 -#define MEM_READ_SIZE (sh4_x86.tlb_on?MEM_READ_SIZE_VMA:MEM_READ_SIZE_PHYS)
21.82 -#define MEM_WRITE_SIZE (sh4_x86.tlb_on?MEM_WRITE_SIZE_VMA:MEM_WRITE_SIZE_PHYS)
21.83 +#define MEM_READ_SIZE (CALL_FUNC1_SIZE)
21.84 +#define MEM_WRITE_SIZE (CALL_FUNC2_SIZE)
21.85 +#define MMU_TRANSLATE_SIZE (sh4_x86.tlb_on ? (CALL_FUNC1_SIZE + 12) : 0 )
21.86
21.87 #define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
21.88
21.89 @@ -369,6 +373,9 @@
21.90 } else {
21.91 ir = sh4_read_word(pc);
21.92 }
21.93 + if( !sh4_x86.in_delay_slot ) {
21.94 + sh4_x86_add_recovery(pc);
21.95 + }
21.96 %%
21.97 /* ALU operations */
21.98 ADD Rm, Rn {:
21.99 @@ -419,9 +426,10 @@
21.100 AND.B #imm, @(R0, GBR) {:
21.101 load_reg( R_EAX, 0 );
21.102 load_spreg( R_ECX, R_GBR );
21.103 - ADD_r32_r32( R_EAX, R_ECX );
21.104 - PUSH_realigned_r32(R_ECX);
21.105 - MEM_READ_BYTE( R_ECX, R_EAX );
21.106 + ADD_r32_r32( R_ECX, R_EAX );
21.107 + MMU_TRANSLATE_WRITE( R_EAX );
21.108 + PUSH_realigned_r32(R_EAX);
21.109 + MEM_READ_BYTE( R_EAX, R_EAX );
21.110 POP_realigned_r32(R_ECX);
21.111 AND_imm32_r32(imm, R_EAX );
21.112 MEM_WRITE_BYTE( R_ECX, R_EAX );
21.113 @@ -584,18 +592,35 @@
21.114 MOVZX_r16_r32( R_EAX, R_EAX );
21.115 store_reg( R_EAX, Rn );
21.116 :}
21.117 -MAC.L @Rm+, @Rn+ {:
21.118 - load_reg( R_ECX, Rm );
21.119 - check_ralign32( R_ECX );
21.120 - load_reg( R_ECX, Rn );
21.121 - check_ralign32( R_ECX );
21.122 - ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
21.123 - MEM_READ_LONG( R_ECX, R_EAX );
21.124 - PUSH_realigned_r32( R_EAX );
21.125 - load_reg( R_ECX, Rm );
21.126 - ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.127 +MAC.L @Rm+, @Rn+ {:
21.128 + if( Rm == Rn ) {
21.129 + load_reg( R_EAX, Rm );
21.130 + check_ralign32( R_EAX );
21.131 + MMU_TRANSLATE_READ( R_EAX );
21.132 + PUSH_realigned_r32( R_EAX );
21.133 + load_reg( R_EAX, Rn );
21.134 + ADD_imm8s_r32( 4, R_EAX );
21.135 + MMU_TRANSLATE_READ( R_EAX );
21.136 + ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rn]) );
21.137 + // Note translate twice in case of page boundaries. Maybe worth
21.138 + // adding a page-boundary check to skip the second translation
21.139 + } else {
21.140 + load_reg( R_EAX, Rm );
21.141 + check_ralign32( R_EAX );
21.142 + MMU_TRANSLATE_READ( R_EAX );
21.143 + PUSH_realigned_r32( R_EAX );
21.144 + load_reg( R_EAX, Rn );
21.145 + check_ralign32( R_EAX );
21.146 + MMU_TRANSLATE_READ( R_EAX );
21.147 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
21.148 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.149 + }
21.150 + MEM_READ_LONG( R_EAX, R_EAX );
21.151 + POP_r32( R_ECX );
21.152 + PUSH_r32( R_EAX );
21.153 MEM_READ_LONG( R_ECX, R_EAX );
21.154 POP_realigned_r32( R_ECX );
21.155 +
21.156 IMUL_r32( R_ECX );
21.157 ADD_r32_sh4r( R_EAX, R_MACL );
21.158 ADC_r32_sh4r( R_EDX, R_MACH );
21.159 @@ -608,15 +633,31 @@
21.160 sh4_x86.tstate = TSTATE_NONE;
21.161 :}
21.162 MAC.W @Rm+, @Rn+ {:
21.163 - load_reg( R_ECX, Rm );
21.164 - check_ralign16( R_ECX );
21.165 - load_reg( R_ECX, Rn );
21.166 - check_ralign16( R_ECX );
21.167 - ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
21.168 - MEM_READ_WORD( R_ECX, R_EAX );
21.169 - PUSH_realigned_r32( R_EAX );
21.170 - load_reg( R_ECX, Rm );
21.171 - ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
21.172 + if( Rm == Rn ) {
21.173 + load_reg( R_EAX, Rm );
21.174 + check_ralign16( R_EAX );
21.175 + MMU_TRANSLATE_READ( R_EAX );
21.176 + PUSH_realigned_r32( R_EAX );
21.177 + load_reg( R_EAX, Rn );
21.178 + ADD_imm8s_r32( 2, R_EAX );
21.179 + MMU_TRANSLATE_READ( R_EAX );
21.180 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
21.181 + // Note translate twice in case of page boundaries. Maybe worth
21.182 + // adding a page-boundary check to skip the second translation
21.183 + } else {
21.184 + load_reg( R_EAX, Rm );
21.185 + check_ralign16( R_EAX );
21.186 + MMU_TRANSLATE_READ( R_EAX );
21.187 + PUSH_realigned_r32( R_EAX );
21.188 + load_reg( R_EAX, Rn );
21.189 + check_ralign16( R_EAX );
21.190 + MMU_TRANSLATE_READ( R_EAX );
21.191 + ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
21.192 + ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
21.193 + }
21.194 + MEM_READ_WORD( R_EAX, R_EAX );
21.195 + POP_r32( R_ECX );
21.196 + PUSH_r32( R_EAX );
21.197 MEM_READ_WORD( R_ECX, R_EAX );
21.198 POP_realigned_r32( R_ECX );
21.199 IMUL_r32( R_ECX );
21.200 @@ -709,9 +750,10 @@
21.201 OR.B #imm, @(R0, GBR) {:
21.202 load_reg( R_EAX, 0 );
21.203 load_spreg( R_ECX, R_GBR );
21.204 - ADD_r32_r32( R_EAX, R_ECX );
21.205 - PUSH_realigned_r32(R_ECX);
21.206 - MEM_READ_BYTE( R_ECX, R_EAX );
21.207 + ADD_r32_r32( R_ECX, R_EAX );
21.208 + MMU_TRANSLATE_WRITE( R_EAX );
21.209 + PUSH_realigned_r32(R_EAX);
21.210 + MEM_READ_BYTE( R_EAX, R_EAX );
21.211 POP_realigned_r32(R_ECX);
21.212 OR_imm32_r32(imm, R_EAX );
21.213 MEM_WRITE_BYTE( R_ECX, R_EAX );
21.214 @@ -905,12 +947,14 @@
21.215 sh4_x86.tstate = TSTATE_NONE;
21.216 :}
21.217 TAS.B @Rn {:
21.218 - load_reg( R_ECX, Rn );
21.219 - MEM_READ_BYTE( R_ECX, R_EAX );
21.220 + load_reg( R_EAX, Rn );
21.221 + MMU_TRANSLATE_WRITE( R_EAX );
21.222 + PUSH_realigned_r32( R_EAX );
21.223 + MEM_READ_BYTE( R_EAX, R_EAX );
21.224 TEST_r8_r8( R_AL, R_AL );
21.225 SETE_t();
21.226 OR_imm8_r8( 0x80, R_AL );
21.227 - load_reg( R_ECX, Rn );
21.228 + POP_realigned_r32( R_ECX );
21.229 MEM_WRITE_BYTE( R_ECX, R_EAX );
21.230 sh4_x86.tstate = TSTATE_NONE;
21.231 :}
21.232 @@ -930,8 +974,9 @@
21.233 TST.B #imm, @(R0, GBR) {:
21.234 load_reg( R_EAX, 0);
21.235 load_reg( R_ECX, R_GBR);
21.236 - ADD_r32_r32( R_EAX, R_ECX );
21.237 - MEM_READ_BYTE( R_ECX, R_EAX );
21.238 + ADD_r32_r32( R_ECX, R_EAX );
21.239 + MMU_TRANSLATE_READ( R_EAX );
21.240 + MEM_READ_BYTE( R_EAX, R_EAX );
21.241 TEST_imm8_r8( imm, R_AL );
21.242 SETE_t();
21.243 sh4_x86.tstate = TSTATE_E;
21.244 @@ -952,9 +997,10 @@
21.245 XOR.B #imm, @(R0, GBR) {:
21.246 load_reg( R_EAX, 0 );
21.247 load_spreg( R_ECX, R_GBR );
21.248 - ADD_r32_r32( R_EAX, R_ECX );
21.249 - PUSH_realigned_r32(R_ECX);
21.250 - MEM_READ_BYTE(R_ECX, R_EAX);
21.251 + ADD_r32_r32( R_ECX, R_EAX );
21.252 + MMU_TRANSLATE_WRITE( R_EAX );
21.253 + PUSH_realigned_r32(R_EAX);
21.254 + MEM_READ_BYTE(R_EAX, R_EAX);
21.255 POP_realigned_r32(R_ECX);
21.256 XOR_imm32_r32( imm, R_EAX );
21.257 MEM_WRITE_BYTE( R_ECX, R_EAX );
21.258 @@ -980,150 +1026,165 @@
21.259 store_reg( R_EAX, Rn );
21.260 :}
21.261 MOV.B Rm, @Rn {:
21.262 - load_reg( R_EAX, Rm );
21.263 - load_reg( R_ECX, Rn );
21.264 - MEM_WRITE_BYTE( R_ECX, R_EAX );
21.265 + load_reg( R_EAX, Rn );
21.266 + MMU_TRANSLATE_WRITE( R_EAX );
21.267 + load_reg( R_EDX, Rm );
21.268 + MEM_WRITE_BYTE( R_EAX, R_EDX );
21.269 sh4_x86.tstate = TSTATE_NONE;
21.270 :}
21.271 MOV.B Rm, @-Rn {:
21.272 - load_reg( R_EAX, Rm );
21.273 - load_reg( R_ECX, Rn );
21.274 - ADD_imm8s_r32( -1, R_ECX );
21.275 - store_reg( R_ECX, Rn );
21.276 - MEM_WRITE_BYTE( R_ECX, R_EAX );
21.277 + load_reg( R_EAX, Rn );
21.278 + ADD_imm8s_r32( -1, R_EAX );
21.279 + MMU_TRANSLATE_WRITE( R_EAX );
21.280 + load_reg( R_EDX, Rm );
21.281 + ADD_imm8s_sh4r( -1, REG_OFFSET(r[Rn]) );
21.282 + MEM_WRITE_BYTE( R_EAX, R_EDX );
21.283 sh4_x86.tstate = TSTATE_NONE;
21.284 :}
21.285 MOV.B Rm, @(R0, Rn) {:
21.286 load_reg( R_EAX, 0 );
21.287 load_reg( R_ECX, Rn );
21.288 - ADD_r32_r32( R_EAX, R_ECX );
21.289 - load_reg( R_EAX, Rm );
21.290 - MEM_WRITE_BYTE( R_ECX, R_EAX );
21.291 + ADD_r32_r32( R_ECX, R_EAX );
21.292 + MMU_TRANSLATE_WRITE( R_EAX );
21.293 + load_reg( R_EDX, Rm );
21.294 + MEM_WRITE_BYTE( R_EAX, R_EDX );
21.295 sh4_x86.tstate = TSTATE_NONE;
21.296 :}
21.297 MOV.B R0, @(disp, GBR) {:
21.298 - load_reg( R_EAX, 0 );
21.299 - load_spreg( R_ECX, R_GBR );
21.300 - ADD_imm32_r32( disp, R_ECX );
21.301 - MEM_WRITE_BYTE( R_ECX, R_EAX );
21.302 + load_spreg( R_EAX, R_GBR );
21.303 + ADD_imm32_r32( disp, R_EAX );
21.304 + MMU_TRANSLATE_WRITE( R_EAX );
21.305 + load_reg( R_EDX, 0 );
21.306 + MEM_WRITE_BYTE( R_EAX, R_EDX );
21.307 sh4_x86.tstate = TSTATE_NONE;
21.308 :}
21.309 MOV.B R0, @(disp, Rn) {:
21.310 - load_reg( R_EAX, 0 );
21.311 - load_reg( R_ECX, Rn );
21.312 - ADD_imm32_r32( disp, R_ECX );
21.313 - MEM_WRITE_BYTE( R_ECX, R_EAX );
21.314 + load_reg( R_EAX, Rn );
21.315 + ADD_imm32_r32( disp, R_EAX );
21.316 + MMU_TRANSLATE_WRITE( R_EAX );
21.317 + load_reg( R_EDX, 0 );
21.318 + MEM_WRITE_BYTE( R_EAX, R_EDX );
21.319 sh4_x86.tstate = TSTATE_NONE;
21.320 :}
21.321 MOV.B @Rm, Rn {:
21.322 - load_reg( R_ECX, Rm );
21.323 - MEM_READ_BYTE( R_ECX, R_EAX );
21.324 + load_reg( R_EAX, Rm );
21.325 + MMU_TRANSLATE_READ( R_EAX );
21.326 + MEM_READ_BYTE( R_EAX, R_EAX );
21.327 store_reg( R_EAX, Rn );
21.328 sh4_x86.tstate = TSTATE_NONE;
21.329 :}
21.330 MOV.B @Rm+, Rn {:
21.331 - load_reg( R_ECX, Rm );
21.332 - MOV_r32_r32( R_ECX, R_EAX );
21.333 - ADD_imm8s_r32( 1, R_EAX );
21.334 - store_reg( R_EAX, Rm );
21.335 - MEM_READ_BYTE( R_ECX, R_EAX );
21.336 + load_reg( R_EAX, Rm );
21.337 + MMU_TRANSLATE_READ( R_EAX );
21.338 + ADD_imm8s_sh4r( 1, REG_OFFSET(r[Rm]) );
21.339 + MEM_READ_BYTE( R_EAX, R_EAX );
21.340 store_reg( R_EAX, Rn );
21.341 sh4_x86.tstate = TSTATE_NONE;
21.342 :}
21.343 MOV.B @(R0, Rm), Rn {:
21.344 load_reg( R_EAX, 0 );
21.345 load_reg( R_ECX, Rm );
21.346 - ADD_r32_r32( R_EAX, R_ECX );
21.347 - MEM_READ_BYTE( R_ECX, R_EAX );
21.348 + ADD_r32_r32( R_ECX, R_EAX );
21.349 + MMU_TRANSLATE_READ( R_EAX )
21.350 + MEM_READ_BYTE( R_EAX, R_EAX );
21.351 store_reg( R_EAX, Rn );
21.352 sh4_x86.tstate = TSTATE_NONE;
21.353 :}
21.354 MOV.B @(disp, GBR), R0 {:
21.355 - load_spreg( R_ECX, R_GBR );
21.356 - ADD_imm32_r32( disp, R_ECX );
21.357 - MEM_READ_BYTE( R_ECX, R_EAX );
21.358 + load_spreg( R_EAX, R_GBR );
21.359 + ADD_imm32_r32( disp, R_EAX );
21.360 + MMU_TRANSLATE_READ( R_EAX );
21.361 + MEM_READ_BYTE( R_EAX, R_EAX );
21.362 store_reg( R_EAX, 0 );
21.363 sh4_x86.tstate = TSTATE_NONE;
21.364 :}
21.365 MOV.B @(disp, Rm), R0 {:
21.366 - load_reg( R_ECX, Rm );
21.367 - ADD_imm32_r32( disp, R_ECX );
21.368 - MEM_READ_BYTE( R_ECX, R_EAX );
21.369 + load_reg( R_EAX, Rm );
21.370 + ADD_imm32_r32( disp, R_EAX );
21.371 + MMU_TRANSLATE_READ( R_EAX );
21.372 + MEM_READ_BYTE( R_EAX, R_EAX );
21.373 store_reg( R_EAX, 0 );
21.374 sh4_x86.tstate = TSTATE_NONE;
21.375 :}
21.376 MOV.L Rm, @Rn {:
21.377 - load_reg( R_EAX, Rm );
21.378 - load_reg( R_ECX, Rn );
21.379 - check_walign32(R_ECX);
21.380 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.381 + load_reg( R_EAX, Rn );
21.382 + check_walign32(R_EAX);
21.383 + MMU_TRANSLATE_WRITE( R_EAX );
21.384 + load_reg( R_EDX, Rm );
21.385 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.386 sh4_x86.tstate = TSTATE_NONE;
21.387 :}
21.388 MOV.L Rm, @-Rn {:
21.389 - load_reg( R_EAX, Rm );
21.390 - load_reg( R_ECX, Rn );
21.391 - check_walign32( R_ECX );
21.392 - ADD_imm8s_r32( -4, R_ECX );
21.393 - store_reg( R_ECX, Rn );
21.394 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.395 + load_reg( R_EAX, Rn );
21.396 + ADD_imm8s_r32( -4, R_EAX );
21.397 + check_walign32( R_EAX );
21.398 + MMU_TRANSLATE_WRITE( R_EAX );
21.399 + load_reg( R_EDX, Rm );
21.400 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.401 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.402 sh4_x86.tstate = TSTATE_NONE;
21.403 :}
21.404 MOV.L Rm, @(R0, Rn) {:
21.405 load_reg( R_EAX, 0 );
21.406 load_reg( R_ECX, Rn );
21.407 - ADD_r32_r32( R_EAX, R_ECX );
21.408 - check_walign32( R_ECX );
21.409 - load_reg( R_EAX, Rm );
21.410 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.411 + ADD_r32_r32( R_ECX, R_EAX );
21.412 + check_walign32( R_EAX );
21.413 + MMU_TRANSLATE_WRITE( R_EAX );
21.414 + load_reg( R_EDX, Rm );
21.415 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.416 sh4_x86.tstate = TSTATE_NONE;
21.417 :}
21.418 MOV.L R0, @(disp, GBR) {:
21.419 - load_spreg( R_ECX, R_GBR );
21.420 - load_reg( R_EAX, 0 );
21.421 - ADD_imm32_r32( disp, R_ECX );
21.422 - check_walign32( R_ECX );
21.423 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.424 + load_spreg( R_EAX, R_GBR );
21.425 + ADD_imm32_r32( disp, R_EAX );
21.426 + check_walign32( R_EAX );
21.427 + MMU_TRANSLATE_WRITE( R_EAX );
21.428 + load_reg( R_EDX, 0 );
21.429 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.430 sh4_x86.tstate = TSTATE_NONE;
21.431 :}
21.432 MOV.L Rm, @(disp, Rn) {:
21.433 - load_reg( R_ECX, Rn );
21.434 - load_reg( R_EAX, Rm );
21.435 - ADD_imm32_r32( disp, R_ECX );
21.436 - check_walign32( R_ECX );
21.437 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.438 + load_reg( R_EAX, Rn );
21.439 + ADD_imm32_r32( disp, R_EAX );
21.440 + check_walign32( R_EAX );
21.441 + MMU_TRANSLATE_WRITE( R_EAX );
21.442 + load_reg( R_EDX, Rm );
21.443 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.444 sh4_x86.tstate = TSTATE_NONE;
21.445 :}
21.446 MOV.L @Rm, Rn {:
21.447 - load_reg( R_ECX, Rm );
21.448 - check_ralign32( R_ECX );
21.449 - MEM_READ_LONG( R_ECX, R_EAX );
21.450 + load_reg( R_EAX, Rm );
21.451 + check_ralign32( R_EAX );
21.452 + MMU_TRANSLATE_READ( R_EAX );
21.453 + MEM_READ_LONG( R_EAX, R_EAX );
21.454 store_reg( R_EAX, Rn );
21.455 sh4_x86.tstate = TSTATE_NONE;
21.456 :}
21.457 MOV.L @Rm+, Rn {:
21.458 load_reg( R_EAX, Rm );
21.459 check_ralign32( R_EAX );
21.460 - MOV_r32_r32( R_EAX, R_ECX );
21.461 - ADD_imm8s_r32( 4, R_EAX );
21.462 - store_reg( R_EAX, Rm );
21.463 - MEM_READ_LONG( R_ECX, R_EAX );
21.464 + MMU_TRANSLATE_READ( R_EAX );
21.465 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.466 + MEM_READ_LONG( R_EAX, R_EAX );
21.467 store_reg( R_EAX, Rn );
21.468 sh4_x86.tstate = TSTATE_NONE;
21.469 :}
21.470 MOV.L @(R0, Rm), Rn {:
21.471 load_reg( R_EAX, 0 );
21.472 load_reg( R_ECX, Rm );
21.473 - ADD_r32_r32( R_EAX, R_ECX );
21.474 - check_ralign32( R_ECX );
21.475 - MEM_READ_LONG( R_ECX, R_EAX );
21.476 + ADD_r32_r32( R_ECX, R_EAX );
21.477 + check_ralign32( R_EAX );
21.478 + MMU_TRANSLATE_READ( R_EAX );
21.479 + MEM_READ_LONG( R_EAX, R_EAX );
21.480 store_reg( R_EAX, Rn );
21.481 sh4_x86.tstate = TSTATE_NONE;
21.482 :}
21.483 MOV.L @(disp, GBR), R0 {:
21.484 - load_spreg( R_ECX, R_GBR );
21.485 - ADD_imm32_r32( disp, R_ECX );
21.486 - check_ralign32( R_ECX );
21.487 - MEM_READ_LONG( R_ECX, R_EAX );
21.488 + load_spreg( R_EAX, R_GBR );
21.489 + ADD_imm32_r32( disp, R_EAX );
21.490 + check_ralign32( R_EAX );
21.491 + MMU_TRANSLATE_READ( R_EAX );
21.492 + MEM_READ_LONG( R_EAX, R_EAX );
21.493 store_reg( R_EAX, 0 );
21.494 sh4_x86.tstate = TSTATE_NONE;
21.495 :}
21.496 @@ -1148,94 +1209,103 @@
21.497 // Note: we use sh4r.pc for the calc as we could be running at a
21.498 // different virtual address than the translation was done with,
21.499 // but we can safely assume that the low bits are the same.
21.500 - load_imm32( R_ECX, (pc-sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
21.501 - ADD_sh4r_r32( R_PC, R_ECX );
21.502 - MEM_READ_LONG( R_ECX, R_EAX );
21.503 + load_imm32( R_EAX, (pc-sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
21.504 + ADD_sh4r_r32( R_PC, R_EAX );
21.505 + MMU_TRANSLATE_READ( R_EAX );
21.506 + MEM_READ_LONG( R_EAX, R_EAX );
21.507 sh4_x86.tstate = TSTATE_NONE;
21.508 }
21.509 store_reg( R_EAX, Rn );
21.510 }
21.511 :}
21.512 MOV.L @(disp, Rm), Rn {:
21.513 - load_reg( R_ECX, Rm );
21.514 - ADD_imm8s_r32( disp, R_ECX );
21.515 - check_ralign32( R_ECX );
21.516 - MEM_READ_LONG( R_ECX, R_EAX );
21.517 + load_reg( R_EAX, Rm );
21.518 + ADD_imm8s_r32( disp, R_EAX );
21.519 + check_ralign32( R_EAX );
21.520 + MMU_TRANSLATE_READ( R_EAX );
21.521 + MEM_READ_LONG( R_EAX, R_EAX );
21.522 store_reg( R_EAX, Rn );
21.523 sh4_x86.tstate = TSTATE_NONE;
21.524 :}
21.525 MOV.W Rm, @Rn {:
21.526 - load_reg( R_ECX, Rn );
21.527 - check_walign16( R_ECX );
21.528 - load_reg( R_EAX, Rm );
21.529 - MEM_WRITE_WORD( R_ECX, R_EAX );
21.530 + load_reg( R_EAX, Rn );
21.531 + check_walign16( R_EAX );
21.532 + MMU_TRANSLATE_WRITE( R_EAX )
21.533 + load_reg( R_EDX, Rm );
21.534 + MEM_WRITE_WORD( R_EAX, R_EDX );
21.535 sh4_x86.tstate = TSTATE_NONE;
21.536 :}
21.537 MOV.W Rm, @-Rn {:
21.538 - load_reg( R_ECX, Rn );
21.539 - check_walign16( R_ECX );
21.540 - load_reg( R_EAX, Rm );
21.541 - ADD_imm8s_r32( -2, R_ECX );
21.542 - store_reg( R_ECX, Rn );
21.543 - MEM_WRITE_WORD( R_ECX, R_EAX );
21.544 + load_reg( R_EAX, Rn );
21.545 + ADD_imm8s_r32( -2, R_EAX );
21.546 + check_walign16( R_EAX );
21.547 + MMU_TRANSLATE_WRITE( R_EAX );
21.548 + load_reg( R_EDX, Rm );
21.549 + ADD_imm8s_sh4r( -2, REG_OFFSET(r[Rn]) );
21.550 + MEM_WRITE_WORD( R_EAX, R_EDX );
21.551 sh4_x86.tstate = TSTATE_NONE;
21.552 :}
21.553 MOV.W Rm, @(R0, Rn) {:
21.554 load_reg( R_EAX, 0 );
21.555 load_reg( R_ECX, Rn );
21.556 - ADD_r32_r32( R_EAX, R_ECX );
21.557 - check_walign16( R_ECX );
21.558 - load_reg( R_EAX, Rm );
21.559 - MEM_WRITE_WORD( R_ECX, R_EAX );
21.560 + ADD_r32_r32( R_ECX, R_EAX );
21.561 + check_walign16( R_EAX );
21.562 + MMU_TRANSLATE_WRITE( R_EAX );
21.563 + load_reg( R_EDX, Rm );
21.564 + MEM_WRITE_WORD( R_EAX, R_EDX );
21.565 sh4_x86.tstate = TSTATE_NONE;
21.566 :}
21.567 MOV.W R0, @(disp, GBR) {:
21.568 - load_spreg( R_ECX, R_GBR );
21.569 - load_reg( R_EAX, 0 );
21.570 - ADD_imm32_r32( disp, R_ECX );
21.571 - check_walign16( R_ECX );
21.572 - MEM_WRITE_WORD( R_ECX, R_EAX );
21.573 + load_spreg( R_EAX, R_GBR );
21.574 + ADD_imm32_r32( disp, R_EAX );
21.575 + check_walign16( R_EAX );
21.576 + MMU_TRANSLATE_WRITE( R_EAX );
21.577 + load_reg( R_EDX, 0 );
21.578 + MEM_WRITE_WORD( R_EAX, R_EDX );
21.579 sh4_x86.tstate = TSTATE_NONE;
21.580 :}
21.581 MOV.W R0, @(disp, Rn) {:
21.582 - load_reg( R_ECX, Rn );
21.583 - load_reg( R_EAX, 0 );
21.584 - ADD_imm32_r32( disp, R_ECX );
21.585 - check_walign16( R_ECX );
21.586 - MEM_WRITE_WORD( R_ECX, R_EAX );
21.587 + load_reg( R_EAX, Rn );
21.588 + ADD_imm32_r32( disp, R_EAX );
21.589 + check_walign16( R_EAX );
21.590 + MMU_TRANSLATE_WRITE( R_EAX );
21.591 + load_reg( R_EDX, 0 );
21.592 + MEM_WRITE_WORD( R_EAX, R_EDX );
21.593 sh4_x86.tstate = TSTATE_NONE;
21.594 :}
21.595 MOV.W @Rm, Rn {:
21.596 - load_reg( R_ECX, Rm );
21.597 - check_ralign16( R_ECX );
21.598 - MEM_READ_WORD( R_ECX, R_EAX );
21.599 + load_reg( R_EAX, Rm );
21.600 + check_ralign16( R_EAX );
21.601 + MMU_TRANSLATE_READ( R_EAX );
21.602 + MEM_READ_WORD( R_EAX, R_EAX );
21.603 store_reg( R_EAX, Rn );
21.604 sh4_x86.tstate = TSTATE_NONE;
21.605 :}
21.606 MOV.W @Rm+, Rn {:
21.607 load_reg( R_EAX, Rm );
21.608 check_ralign16( R_EAX );
21.609 - MOV_r32_r32( R_EAX, R_ECX );
21.610 - ADD_imm8s_r32( 2, R_EAX );
21.611 - store_reg( R_EAX, Rm );
21.612 - MEM_READ_WORD( R_ECX, R_EAX );
21.613 + MMU_TRANSLATE_READ( R_EAX );
21.614 + ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
21.615 + MEM_READ_WORD( R_EAX, R_EAX );
21.616 store_reg( R_EAX, Rn );
21.617 sh4_x86.tstate = TSTATE_NONE;
21.618 :}
21.619 MOV.W @(R0, Rm), Rn {:
21.620 load_reg( R_EAX, 0 );
21.621 load_reg( R_ECX, Rm );
21.622 - ADD_r32_r32( R_EAX, R_ECX );
21.623 - check_ralign16( R_ECX );
21.624 - MEM_READ_WORD( R_ECX, R_EAX );
21.625 + ADD_r32_r32( R_ECX, R_EAX );
21.626 + check_ralign16( R_EAX );
21.627 + MMU_TRANSLATE_READ( R_EAX );
21.628 + MEM_READ_WORD( R_EAX, R_EAX );
21.629 store_reg( R_EAX, Rn );
21.630 sh4_x86.tstate = TSTATE_NONE;
21.631 :}
21.632 MOV.W @(disp, GBR), R0 {:
21.633 - load_spreg( R_ECX, R_GBR );
21.634 - ADD_imm32_r32( disp, R_ECX );
21.635 - check_ralign16( R_ECX );
21.636 - MEM_READ_WORD( R_ECX, R_EAX );
21.637 + load_spreg( R_EAX, R_GBR );
21.638 + ADD_imm32_r32( disp, R_EAX );
21.639 + check_ralign16( R_EAX );
21.640 + MMU_TRANSLATE_READ( R_EAX );
21.641 + MEM_READ_WORD( R_EAX, R_EAX );
21.642 store_reg( R_EAX, 0 );
21.643 sh4_x86.tstate = TSTATE_NONE;
21.644 :}
21.645 @@ -1250,19 +1320,21 @@
21.646 MOV_moff32_EAX( ptr );
21.647 MOVSX_r16_r32( R_EAX, R_EAX );
21.648 } else {
21.649 - load_imm32( R_ECX, (pc - sh4_x86.block_start_pc) + disp + 4 );
21.650 - ADD_sh4r_r32( R_PC, R_ECX );
21.651 - MEM_READ_WORD( R_ECX, R_EAX );
21.652 + load_imm32( R_EAX, (pc - sh4_x86.block_start_pc) + disp + 4 );
21.653 + ADD_sh4r_r32( R_PC, R_EAX );
21.654 + MMU_TRANSLATE_READ( R_EAX );
21.655 + MEM_READ_WORD( R_EAX, R_EAX );
21.656 sh4_x86.tstate = TSTATE_NONE;
21.657 }
21.658 store_reg( R_EAX, Rn );
21.659 }
21.660 :}
21.661 MOV.W @(disp, Rm), R0 {:
21.662 - load_reg( R_ECX, Rm );
21.663 - ADD_imm32_r32( disp, R_ECX );
21.664 - check_ralign16( R_ECX );
21.665 - MEM_READ_WORD( R_ECX, R_EAX );
21.666 + load_reg( R_EAX, Rm );
21.667 + ADD_imm32_r32( disp, R_EAX );
21.668 + check_ralign16( R_EAX );
21.669 + MMU_TRANSLATE_READ( R_EAX );
21.670 + MEM_READ_WORD( R_EAX, R_EAX );
21.671 store_reg( R_EAX, 0 );
21.672 sh4_x86.tstate = TSTATE_NONE;
21.673 :}
21.674 @@ -1273,13 +1345,15 @@
21.675 load_imm32( R_ECX, (pc - sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
21.676 ADD_sh4r_r32( R_PC, R_ECX );
21.677 store_reg( R_ECX, 0 );
21.678 + sh4_x86.tstate = TSTATE_NONE;
21.679 }
21.680 :}
21.681 MOVCA.L R0, @Rn {:
21.682 - load_reg( R_EAX, 0 );
21.683 - load_reg( R_ECX, Rn );
21.684 - check_walign32( R_ECX );
21.685 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.686 + load_reg( R_EAX, Rn );
21.687 + check_walign32( R_EAX );
21.688 + MMU_TRANSLATE_WRITE( R_EAX );
21.689 + load_reg( R_EDX, 0 );
21.690 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.691 sh4_x86.tstate = TSTATE_NONE;
21.692 :}
21.693
21.694 @@ -1288,8 +1362,9 @@
21.695 if( sh4_x86.in_delay_slot ) {
21.696 SLOTILLEGAL();
21.697 } else {
21.698 - JT_rel8( EXIT_BLOCK_SIZE, nottaken );
21.699 - exit_block( disp + pc + 4, pc+2 );
21.700 + sh4vma_t target = disp + pc + 4;
21.701 + JT_rel8( EXIT_BLOCK_REL_SIZE(target), nottaken );
21.702 + exit_block_rel(target, pc+2 );
21.703 JMP_TARGET(nottaken);
21.704 return 2;
21.705 }
21.706 @@ -1298,6 +1373,7 @@
21.707 if( sh4_x86.in_delay_slot ) {
21.708 SLOTILLEGAL();
21.709 } else {
21.710 + sh4vma_t target = disp + pc + 4;
21.711 sh4_x86.in_delay_slot = TRUE;
21.712 if( sh4_x86.tstate == TSTATE_NONE ) {
21.713 CMP_imm8s_sh4r( 1, R_T );
21.714 @@ -1305,7 +1381,7 @@
21.715 }
21.716 OP(0x0F); OP(0x80+sh4_x86.tstate); uint32_t *patch = (uint32_t *)xlat_output; OP32(0); // JNE rel32
21.717 sh4_translate_instruction(pc+2);
21.718 - exit_block( disp + pc + 4, pc+4 );
21.719 + exit_block_rel( target, pc+4 );
21.720 // not taken
21.721 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
21.722 sh4_translate_instruction(pc+2);
21.723 @@ -1318,7 +1394,7 @@
21.724 } else {
21.725 sh4_x86.in_delay_slot = TRUE;
21.726 sh4_translate_instruction( pc + 2 );
21.727 - exit_block( disp + pc + 4, pc+4 );
21.728 + exit_block_rel( disp + pc + 4, pc+4 );
21.729 sh4_x86.branch_taken = TRUE;
21.730 return 4;
21.731 }
21.732 @@ -1346,7 +1422,7 @@
21.733 store_spreg( R_EAX, R_PR );
21.734 sh4_x86.in_delay_slot = TRUE;
21.735 sh4_translate_instruction( pc + 2 );
21.736 - exit_block( disp + pc + 4, pc+4 );
21.737 + exit_block_rel( disp + pc + 4, pc+4 );
21.738 sh4_x86.branch_taken = TRUE;
21.739 return 4;
21.740 }
21.741 @@ -1371,8 +1447,9 @@
21.742 if( sh4_x86.in_delay_slot ) {
21.743 SLOTILLEGAL();
21.744 } else {
21.745 - JF_rel8( EXIT_BLOCK_SIZE, nottaken );
21.746 - exit_block( disp + pc + 4, pc+2 );
21.747 + sh4vma_t target = disp + pc + 4;
21.748 + JF_rel8( EXIT_BLOCK_REL_SIZE(target), nottaken );
21.749 + exit_block_rel(target, pc+2 );
21.750 JMP_TARGET(nottaken);
21.751 return 2;
21.752 }
21.753 @@ -1388,7 +1465,7 @@
21.754 }
21.755 OP(0x0F); OP(0x80+(sh4_x86.tstate^1)); uint32_t *patch = (uint32_t *)xlat_output; OP32(0); // JE rel32
21.756 sh4_translate_instruction(pc+2);
21.757 - exit_block( disp + pc + 4, pc+4 );
21.758 + exit_block_rel( disp + pc + 4, pc+4 );
21.759 // not taken
21.760 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
21.761 sh4_translate_instruction(pc+2);
21.762 @@ -1558,191 +1635,195 @@
21.763 :}
21.764 FMOV FRm, @Rn {:
21.765 check_fpuen();
21.766 - load_reg( R_ECX, Rn );
21.767 - check_walign32( R_ECX );
21.768 + load_reg( R_EAX, Rn );
21.769 + check_walign32( R_EAX );
21.770 + MMU_TRANSLATE_WRITE( R_EAX );
21.771 load_spreg( R_EDX, R_FPSCR );
21.772 TEST_imm32_r32( FPSCR_SZ, R_EDX );
21.773 JNE_rel8(8 + MEM_WRITE_SIZE, doublesize);
21.774 load_fr_bank( R_EDX );
21.775 - load_fr( R_EDX, R_EAX, FRm );
21.776 - MEM_WRITE_LONG( R_ECX, R_EAX ); // 12
21.777 + load_fr( R_EDX, R_ECX, FRm );
21.778 + MEM_WRITE_LONG( R_EAX, R_ECX ); // 12
21.779 if( FRm&1 ) {
21.780 JMP_rel8( 18 + MEM_WRITE_DOUBLE_SIZE, end );
21.781 JMP_TARGET(doublesize);
21.782 load_xf_bank( R_EDX );
21.783 - load_fr( R_EDX, R_EAX, FRm&0x0E );
21.784 + load_fr( R_EDX, R_ECX, FRm&0x0E );
21.785 load_fr( R_EDX, R_EDX, FRm|0x01 );
21.786 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
21.787 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
21.788 JMP_TARGET(end);
21.789 } else {
21.790 JMP_rel8( 9 + MEM_WRITE_DOUBLE_SIZE, end );
21.791 JMP_TARGET(doublesize);
21.792 load_fr_bank( R_EDX );
21.793 - load_fr( R_EDX, R_EAX, FRm&0x0E );
21.794 + load_fr( R_EDX, R_ECX, FRm&0x0E );
21.795 load_fr( R_EDX, R_EDX, FRm|0x01 );
21.796 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
21.797 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
21.798 JMP_TARGET(end);
21.799 }
21.800 sh4_x86.tstate = TSTATE_NONE;
21.801 :}
21.802 FMOV @Rm, FRn {:
21.803 check_fpuen();
21.804 - load_reg( R_ECX, Rm );
21.805 - check_ralign32( R_ECX );
21.806 + load_reg( R_EAX, Rm );
21.807 + check_ralign32( R_EAX );
21.808 + MMU_TRANSLATE_READ( R_EAX );
21.809 load_spreg( R_EDX, R_FPSCR );
21.810 TEST_imm32_r32( FPSCR_SZ, R_EDX );
21.811 JNE_rel8(8 + MEM_READ_SIZE, doublesize);
21.812 - MEM_READ_LONG( R_ECX, R_EAX );
21.813 + MEM_READ_LONG( R_EAX, R_EAX );
21.814 load_fr_bank( R_EDX );
21.815 store_fr( R_EDX, R_EAX, FRn );
21.816 if( FRn&1 ) {
21.817 JMP_rel8(21 + MEM_READ_DOUBLE_SIZE, end);
21.818 JMP_TARGET(doublesize);
21.819 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
21.820 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
21.821 load_spreg( R_EDX, R_FPSCR ); // assume read_long clobbered it
21.822 load_xf_bank( R_EDX );
21.823 - store_fr( R_EDX, R_EAX, FRn&0x0E );
21.824 - store_fr( R_EDX, R_ECX, FRn|0x01 );
21.825 + store_fr( R_EDX, R_ECX, FRn&0x0E );
21.826 + store_fr( R_EDX, R_EAX, FRn|0x01 );
21.827 JMP_TARGET(end);
21.828 } else {
21.829 JMP_rel8(9 + MEM_READ_DOUBLE_SIZE, end);
21.830 JMP_TARGET(doublesize);
21.831 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
21.832 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
21.833 load_fr_bank( R_EDX );
21.834 - store_fr( R_EDX, R_EAX, FRn&0x0E );
21.835 - store_fr( R_EDX, R_ECX, FRn|0x01 );
21.836 + store_fr( R_EDX, R_ECX, FRn&0x0E );
21.837 + store_fr( R_EDX, R_EAX, FRn|0x01 );
21.838 JMP_TARGET(end);
21.839 }
21.840 sh4_x86.tstate = TSTATE_NONE;
21.841 :}
21.842 FMOV FRm, @-Rn {:
21.843 check_fpuen();
21.844 - load_reg( R_ECX, Rn );
21.845 - check_walign32( R_ECX );
21.846 + load_reg( R_EAX, Rn );
21.847 + check_walign32( R_EAX );
21.848 load_spreg( R_EDX, R_FPSCR );
21.849 TEST_imm32_r32( FPSCR_SZ, R_EDX );
21.850 - JNE_rel8(14 + MEM_WRITE_SIZE, doublesize);
21.851 + JNE_rel8(15 + MEM_WRITE_SIZE + MMU_TRANSLATE_SIZE, doublesize);
21.852 + ADD_imm8s_r32( -4, R_EAX );
21.853 + MMU_TRANSLATE_WRITE( R_EAX );
21.854 load_fr_bank( R_EDX );
21.855 - load_fr( R_EDX, R_EAX, FRm );
21.856 - ADD_imm8s_r32(-4,R_ECX);
21.857 - store_reg( R_ECX, Rn );
21.858 - MEM_WRITE_LONG( R_ECX, R_EAX ); // 12
21.859 + load_fr( R_EDX, R_ECX, FRm );
21.860 + ADD_imm8s_sh4r(-4,REG_OFFSET(r[Rn]));
21.861 + MEM_WRITE_LONG( R_EAX, R_ECX ); // 12
21.862 if( FRm&1 ) {
21.863 - JMP_rel8( 24 + MEM_WRITE_DOUBLE_SIZE, end );
21.864 + JMP_rel8( 25 + MEM_WRITE_DOUBLE_SIZE + MMU_TRANSLATE_SIZE, end );
21.865 JMP_TARGET(doublesize);
21.866 + ADD_imm8s_r32(-8,R_EAX);
21.867 + MMU_TRANSLATE_WRITE( R_EAX );
21.868 load_xf_bank( R_EDX );
21.869 - load_fr( R_EDX, R_EAX, FRm&0x0E );
21.870 + load_fr( R_EDX, R_ECX, FRm&0x0E );
21.871 load_fr( R_EDX, R_EDX, FRm|0x01 );
21.872 - ADD_imm8s_r32(-8,R_ECX);
21.873 - store_reg( R_ECX, Rn );
21.874 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
21.875 + ADD_imm8s_sh4r(-8,REG_OFFSET(r[Rn]));
21.876 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
21.877 JMP_TARGET(end);
21.878 } else {
21.879 - JMP_rel8( 15 + MEM_WRITE_DOUBLE_SIZE, end );
21.880 + JMP_rel8( 16 + MEM_WRITE_DOUBLE_SIZE + MMU_TRANSLATE_SIZE, end );
21.881 JMP_TARGET(doublesize);
21.882 + ADD_imm8s_r32(-8,R_EAX);
21.883 + MMU_TRANSLATE_WRITE( R_EAX );
21.884 load_fr_bank( R_EDX );
21.885 - load_fr( R_EDX, R_EAX, FRm&0x0E );
21.886 + load_fr( R_EDX, R_ECX, FRm&0x0E );
21.887 load_fr( R_EDX, R_EDX, FRm|0x01 );
21.888 - ADD_imm8s_r32(-8,R_ECX);
21.889 - store_reg( R_ECX, Rn );
21.890 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
21.891 + ADD_imm8s_sh4r(-8,REG_OFFSET(r[Rn]));
21.892 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
21.893 JMP_TARGET(end);
21.894 }
21.895 sh4_x86.tstate = TSTATE_NONE;
21.896 :}
21.897 FMOV @Rm+, FRn {:
21.898 check_fpuen();
21.899 - load_reg( R_ECX, Rm );
21.900 - check_ralign32( R_ECX );
21.901 - MOV_r32_r32( R_ECX, R_EAX );
21.902 + load_reg( R_EAX, Rm );
21.903 + check_ralign32( R_EAX );
21.904 + MMU_TRANSLATE_READ( R_EAX );
21.905 load_spreg( R_EDX, R_FPSCR );
21.906 TEST_imm32_r32( FPSCR_SZ, R_EDX );
21.907 - JNE_rel8(14 + MEM_READ_SIZE, doublesize);
21.908 - ADD_imm8s_r32( 4, R_EAX );
21.909 - store_reg( R_EAX, Rm );
21.910 - MEM_READ_LONG( R_ECX, R_EAX );
21.911 + JNE_rel8(12 + MEM_READ_SIZE, doublesize);
21.912 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.913 + MEM_READ_LONG( R_EAX, R_EAX );
21.914 load_fr_bank( R_EDX );
21.915 store_fr( R_EDX, R_EAX, FRn );
21.916 if( FRn&1 ) {
21.917 - JMP_rel8(27 + MEM_READ_DOUBLE_SIZE, end);
21.918 + JMP_rel8(25 + MEM_READ_DOUBLE_SIZE, end);
21.919 JMP_TARGET(doublesize);
21.920 - ADD_imm8s_r32( 8, R_EAX );
21.921 - store_reg(R_EAX, Rm);
21.922 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
21.923 + ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rm]) );
21.924 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
21.925 load_spreg( R_EDX, R_FPSCR ); // assume read_long clobbered it
21.926 load_xf_bank( R_EDX );
21.927 - store_fr( R_EDX, R_EAX, FRn&0x0E );
21.928 - store_fr( R_EDX, R_ECX, FRn|0x01 );
21.929 + store_fr( R_EDX, R_ECX, FRn&0x0E );
21.930 + store_fr( R_EDX, R_EAX, FRn|0x01 );
21.931 JMP_TARGET(end);
21.932 } else {
21.933 - JMP_rel8(15 + MEM_READ_DOUBLE_SIZE, end);
21.934 - ADD_imm8s_r32( 8, R_EAX );
21.935 - store_reg(R_EAX, Rm);
21.936 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
21.937 + JMP_rel8(13 + MEM_READ_DOUBLE_SIZE, end);
21.938 + ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rm]) );
21.939 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
21.940 load_fr_bank( R_EDX );
21.941 - store_fr( R_EDX, R_EAX, FRn&0x0E );
21.942 - store_fr( R_EDX, R_ECX, FRn|0x01 );
21.943 + store_fr( R_EDX, R_ECX, FRn&0x0E );
21.944 + store_fr( R_EDX, R_EAX, FRn|0x01 );
21.945 JMP_TARGET(end);
21.946 }
21.947 sh4_x86.tstate = TSTATE_NONE;
21.948 :}
21.949 FMOV FRm, @(R0, Rn) {:
21.950 check_fpuen();
21.951 - load_reg( R_ECX, Rn );
21.952 - ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX );
21.953 - check_walign32( R_ECX );
21.954 + load_reg( R_EAX, Rn );
21.955 + ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
21.956 + check_walign32( R_EAX );
21.957 + MMU_TRANSLATE_WRITE( R_EAX );
21.958 load_spreg( R_EDX, R_FPSCR );
21.959 TEST_imm32_r32( FPSCR_SZ, R_EDX );
21.960 JNE_rel8(8 + MEM_WRITE_SIZE, doublesize);
21.961 load_fr_bank( R_EDX );
21.962 - load_fr( R_EDX, R_EAX, FRm );
21.963 - MEM_WRITE_LONG( R_ECX, R_EAX ); // 12
21.964 + load_fr( R_EDX, R_ECX, FRm );
21.965 + MEM_WRITE_LONG( R_EAX, R_ECX ); // 12
21.966 if( FRm&1 ) {
21.967 JMP_rel8( 18 + MEM_WRITE_DOUBLE_SIZE, end );
21.968 JMP_TARGET(doublesize);
21.969 load_xf_bank( R_EDX );
21.970 - load_fr( R_EDX, R_EAX, FRm&0x0E );
21.971 + load_fr( R_EDX, R_ECX, FRm&0x0E );
21.972 load_fr( R_EDX, R_EDX, FRm|0x01 );
21.973 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
21.974 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
21.975 JMP_TARGET(end);
21.976 } else {
21.977 JMP_rel8( 9 + MEM_WRITE_DOUBLE_SIZE, end );
21.978 JMP_TARGET(doublesize);
21.979 load_fr_bank( R_EDX );
21.980 - load_fr( R_EDX, R_EAX, FRm&0x0E );
21.981 + load_fr( R_EDX, R_ECX, FRm&0x0E );
21.982 load_fr( R_EDX, R_EDX, FRm|0x01 );
21.983 - MEM_WRITE_DOUBLE( R_ECX, R_EAX, R_EDX );
21.984 + MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
21.985 JMP_TARGET(end);
21.986 }
21.987 sh4_x86.tstate = TSTATE_NONE;
21.988 :}
21.989 FMOV @(R0, Rm), FRn {:
21.990 check_fpuen();
21.991 - load_reg( R_ECX, Rm );
21.992 - ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX );
21.993 - check_ralign32( R_ECX );
21.994 + load_reg( R_EAX, Rm );
21.995 + ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
21.996 + check_ralign32( R_EAX );
21.997 + MMU_TRANSLATE_READ( R_EAX );
21.998 load_spreg( R_EDX, R_FPSCR );
21.999 TEST_imm32_r32( FPSCR_SZ, R_EDX );
21.1000 JNE_rel8(8 + MEM_READ_SIZE, doublesize);
21.1001 - MEM_READ_LONG( R_ECX, R_EAX );
21.1002 + MEM_READ_LONG( R_EAX, R_EAX );
21.1003 load_fr_bank( R_EDX );
21.1004 store_fr( R_EDX, R_EAX, FRn );
21.1005 if( FRn&1 ) {
21.1006 JMP_rel8(21 + MEM_READ_DOUBLE_SIZE, end);
21.1007 JMP_TARGET(doublesize);
21.1008 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
21.1009 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
21.1010 load_spreg( R_EDX, R_FPSCR ); // assume read_long clobbered it
21.1011 load_xf_bank( R_EDX );
21.1012 - store_fr( R_EDX, R_EAX, FRn&0x0E );
21.1013 - store_fr( R_EDX, R_ECX, FRn|0x01 );
21.1014 + store_fr( R_EDX, R_ECX, FRn&0x0E );
21.1015 + store_fr( R_EDX, R_EAX, FRn|0x01 );
21.1016 JMP_TARGET(end);
21.1017 } else {
21.1018 JMP_rel8(9 + MEM_READ_DOUBLE_SIZE, end);
21.1019 JMP_TARGET(doublesize);
21.1020 - MEM_READ_DOUBLE( R_ECX, R_EAX, R_ECX );
21.1021 + MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
21.1022 load_fr_bank( R_EDX );
21.1023 - store_fr( R_EDX, R_EAX, FRn&0x0E );
21.1024 - store_fr( R_EDX, R_ECX, FRn|0x01 );
21.1025 + store_fr( R_EDX, R_ECX, FRn&0x0E );
21.1026 + store_fr( R_EDX, R_EAX, FRn|0x01 );
21.1027 JMP_TARGET(end);
21.1028 }
21.1029 sh4_x86.tstate = TSTATE_NONE;
21.1030 @@ -2183,10 +2264,9 @@
21.1031 LDC.L @Rm+, GBR {:
21.1032 load_reg( R_EAX, Rm );
21.1033 check_ralign32( R_EAX );
21.1034 - MOV_r32_r32( R_EAX, R_ECX );
21.1035 - ADD_imm8s_r32( 4, R_EAX );
21.1036 - store_reg( R_EAX, Rm );
21.1037 - MEM_READ_LONG( R_ECX, R_EAX );
21.1038 + MMU_TRANSLATE_READ( R_EAX );
21.1039 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1040 + MEM_READ_LONG( R_EAX, R_EAX );
21.1041 store_spreg( R_EAX, R_GBR );
21.1042 sh4_x86.tstate = TSTATE_NONE;
21.1043 :}
21.1044 @@ -2197,10 +2277,9 @@
21.1045 check_priv();
21.1046 load_reg( R_EAX, Rm );
21.1047 check_ralign32( R_EAX );
21.1048 - MOV_r32_r32( R_EAX, R_ECX );
21.1049 - ADD_imm8s_r32( 4, R_EAX );
21.1050 - store_reg( R_EAX, Rm );
21.1051 - MEM_READ_LONG( R_ECX, R_EAX );
21.1052 + MMU_TRANSLATE_READ( R_EAX );
21.1053 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1054 + MEM_READ_LONG( R_EAX, R_EAX );
21.1055 call_func1( sh4_write_sr, R_EAX );
21.1056 sh4_x86.priv_checked = FALSE;
21.1057 sh4_x86.fpuen_checked = FALSE;
21.1058 @@ -2211,10 +2290,9 @@
21.1059 check_priv();
21.1060 load_reg( R_EAX, Rm );
21.1061 check_ralign32( R_EAX );
21.1062 - MOV_r32_r32( R_EAX, R_ECX );
21.1063 - ADD_imm8s_r32( 4, R_EAX );
21.1064 - store_reg( R_EAX, Rm );
21.1065 - MEM_READ_LONG( R_ECX, R_EAX );
21.1066 + MMU_TRANSLATE_READ( R_EAX );
21.1067 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1068 + MEM_READ_LONG( R_EAX, R_EAX );
21.1069 store_spreg( R_EAX, R_VBR );
21.1070 sh4_x86.tstate = TSTATE_NONE;
21.1071 :}
21.1072 @@ -2222,10 +2300,9 @@
21.1073 check_priv();
21.1074 load_reg( R_EAX, Rm );
21.1075 check_ralign32( R_EAX );
21.1076 - MOV_r32_r32( R_EAX, R_ECX );
21.1077 - ADD_imm8s_r32( 4, R_EAX );
21.1078 - store_reg( R_EAX, Rm );
21.1079 - MEM_READ_LONG( R_ECX, R_EAX );
21.1080 + MMU_TRANSLATE_READ( R_EAX );
21.1081 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1082 + MEM_READ_LONG( R_EAX, R_EAX );
21.1083 store_spreg( R_EAX, R_SSR );
21.1084 sh4_x86.tstate = TSTATE_NONE;
21.1085 :}
21.1086 @@ -2233,10 +2310,9 @@
21.1087 check_priv();
21.1088 load_reg( R_EAX, Rm );
21.1089 check_ralign32( R_EAX );
21.1090 - MOV_r32_r32( R_EAX, R_ECX );
21.1091 - ADD_imm8s_r32( 4, R_EAX );
21.1092 - store_reg( R_EAX, Rm );
21.1093 - MEM_READ_LONG( R_ECX, R_EAX );
21.1094 + MMU_TRANSLATE_READ( R_EAX );
21.1095 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1096 + MEM_READ_LONG( R_EAX, R_EAX );
21.1097 store_spreg( R_EAX, R_SGR );
21.1098 sh4_x86.tstate = TSTATE_NONE;
21.1099 :}
21.1100 @@ -2244,10 +2320,9 @@
21.1101 check_priv();
21.1102 load_reg( R_EAX, Rm );
21.1103 check_ralign32( R_EAX );
21.1104 - MOV_r32_r32( R_EAX, R_ECX );
21.1105 - ADD_imm8s_r32( 4, R_EAX );
21.1106 - store_reg( R_EAX, Rm );
21.1107 - MEM_READ_LONG( R_ECX, R_EAX );
21.1108 + MMU_TRANSLATE_READ( R_EAX );
21.1109 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1110 + MEM_READ_LONG( R_EAX, R_EAX );
21.1111 store_spreg( R_EAX, R_SPC );
21.1112 sh4_x86.tstate = TSTATE_NONE;
21.1113 :}
21.1114 @@ -2255,10 +2330,9 @@
21.1115 check_priv();
21.1116 load_reg( R_EAX, Rm );
21.1117 check_ralign32( R_EAX );
21.1118 - MOV_r32_r32( R_EAX, R_ECX );
21.1119 - ADD_imm8s_r32( 4, R_EAX );
21.1120 - store_reg( R_EAX, Rm );
21.1121 - MEM_READ_LONG( R_ECX, R_EAX );
21.1122 + MMU_TRANSLATE_READ( R_EAX );
21.1123 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1124 + MEM_READ_LONG( R_EAX, R_EAX );
21.1125 store_spreg( R_EAX, R_DBR );
21.1126 sh4_x86.tstate = TSTATE_NONE;
21.1127 :}
21.1128 @@ -2266,10 +2340,9 @@
21.1129 check_priv();
21.1130 load_reg( R_EAX, Rm );
21.1131 check_ralign32( R_EAX );
21.1132 - MOV_r32_r32( R_EAX, R_ECX );
21.1133 - ADD_imm8s_r32( 4, R_EAX );
21.1134 - store_reg( R_EAX, Rm );
21.1135 - MEM_READ_LONG( R_ECX, R_EAX );
21.1136 + MMU_TRANSLATE_READ( R_EAX );
21.1137 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1138 + MEM_READ_LONG( R_EAX, R_EAX );
21.1139 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
21.1140 sh4_x86.tstate = TSTATE_NONE;
21.1141 :}
21.1142 @@ -2282,10 +2355,9 @@
21.1143 LDS.L @Rm+, FPSCR {:
21.1144 load_reg( R_EAX, Rm );
21.1145 check_ralign32( R_EAX );
21.1146 - MOV_r32_r32( R_EAX, R_ECX );
21.1147 - ADD_imm8s_r32( 4, R_EAX );
21.1148 - store_reg( R_EAX, Rm );
21.1149 - MEM_READ_LONG( R_ECX, R_EAX );
21.1150 + MMU_TRANSLATE_READ( R_EAX );
21.1151 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1152 + MEM_READ_LONG( R_EAX, R_EAX );
21.1153 store_spreg( R_EAX, R_FPSCR );
21.1154 update_fr_bank( R_EAX );
21.1155 sh4_x86.tstate = TSTATE_NONE;
21.1156 @@ -2297,10 +2369,9 @@
21.1157 LDS.L @Rm+, FPUL {:
21.1158 load_reg( R_EAX, Rm );
21.1159 check_ralign32( R_EAX );
21.1160 - MOV_r32_r32( R_EAX, R_ECX );
21.1161 - ADD_imm8s_r32( 4, R_EAX );
21.1162 - store_reg( R_EAX, Rm );
21.1163 - MEM_READ_LONG( R_ECX, R_EAX );
21.1164 + MMU_TRANSLATE_READ( R_EAX );
21.1165 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1166 + MEM_READ_LONG( R_EAX, R_EAX );
21.1167 store_spreg( R_EAX, R_FPUL );
21.1168 sh4_x86.tstate = TSTATE_NONE;
21.1169 :}
21.1170 @@ -2311,10 +2382,9 @@
21.1171 LDS.L @Rm+, MACH {:
21.1172 load_reg( R_EAX, Rm );
21.1173 check_ralign32( R_EAX );
21.1174 - MOV_r32_r32( R_EAX, R_ECX );
21.1175 - ADD_imm8s_r32( 4, R_EAX );
21.1176 - store_reg( R_EAX, Rm );
21.1177 - MEM_READ_LONG( R_ECX, R_EAX );
21.1178 + MMU_TRANSLATE_READ( R_EAX );
21.1179 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1180 + MEM_READ_LONG( R_EAX, R_EAX );
21.1181 store_spreg( R_EAX, R_MACH );
21.1182 sh4_x86.tstate = TSTATE_NONE;
21.1183 :}
21.1184 @@ -2325,10 +2395,9 @@
21.1185 LDS.L @Rm+, MACL {:
21.1186 load_reg( R_EAX, Rm );
21.1187 check_ralign32( R_EAX );
21.1188 - MOV_r32_r32( R_EAX, R_ECX );
21.1189 - ADD_imm8s_r32( 4, R_EAX );
21.1190 - store_reg( R_EAX, Rm );
21.1191 - MEM_READ_LONG( R_ECX, R_EAX );
21.1192 + MMU_TRANSLATE_READ( R_EAX );
21.1193 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1194 + MEM_READ_LONG( R_EAX, R_EAX );
21.1195 store_spreg( R_EAX, R_MACL );
21.1196 sh4_x86.tstate = TSTATE_NONE;
21.1197 :}
21.1198 @@ -2339,10 +2408,9 @@
21.1199 LDS.L @Rm+, PR {:
21.1200 load_reg( R_EAX, Rm );
21.1201 check_ralign32( R_EAX );
21.1202 - MOV_r32_r32( R_EAX, R_ECX );
21.1203 - ADD_imm8s_r32( 4, R_EAX );
21.1204 - store_reg( R_EAX, Rm );
21.1205 - MEM_READ_LONG( R_ECX, R_EAX );
21.1206 + MMU_TRANSLATE_READ( R_EAX );
21.1207 + ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
21.1208 + MEM_READ_LONG( R_EAX, R_EAX );
21.1209 store_spreg( R_EAX, R_PR );
21.1210 sh4_x86.tstate = TSTATE_NONE;
21.1211 :}
21.1212 @@ -2417,81 +2485,91 @@
21.1213 :}
21.1214 STC.L SR, @-Rn {:
21.1215 check_priv();
21.1216 + load_reg( R_EAX, Rn );
21.1217 + check_walign32( R_EAX );
21.1218 + ADD_imm8s_r32( -4, R_EAX );
21.1219 + MMU_TRANSLATE_WRITE( R_EAX );
21.1220 + PUSH_realigned_r32( R_EAX );
21.1221 call_func0( sh4_read_sr );
21.1222 - load_reg( R_ECX, Rn );
21.1223 - check_walign32( R_ECX );
21.1224 - ADD_imm8s_r32( -4, R_ECX );
21.1225 - store_reg( R_ECX, Rn );
21.1226 + POP_realigned_r32( R_ECX );
21.1227 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1228 MEM_WRITE_LONG( R_ECX, R_EAX );
21.1229 sh4_x86.tstate = TSTATE_NONE;
21.1230 :}
21.1231 STC.L VBR, @-Rn {:
21.1232 check_priv();
21.1233 - load_reg( R_ECX, Rn );
21.1234 - check_walign32( R_ECX );
21.1235 - ADD_imm8s_r32( -4, R_ECX );
21.1236 - store_reg( R_ECX, Rn );
21.1237 - load_spreg( R_EAX, R_VBR );
21.1238 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1239 + load_reg( R_EAX, Rn );
21.1240 + check_walign32( R_EAX );
21.1241 + ADD_imm8s_r32( -4, R_EAX );
21.1242 + MMU_TRANSLATE_WRITE( R_EAX );
21.1243 + load_spreg( R_EDX, R_VBR );
21.1244 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1245 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1246 sh4_x86.tstate = TSTATE_NONE;
21.1247 :}
21.1248 STC.L SSR, @-Rn {:
21.1249 check_priv();
21.1250 - load_reg( R_ECX, Rn );
21.1251 - check_walign32( R_ECX );
21.1252 - ADD_imm8s_r32( -4, R_ECX );
21.1253 - store_reg( R_ECX, Rn );
21.1254 - load_spreg( R_EAX, R_SSR );
21.1255 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1256 + load_reg( R_EAX, Rn );
21.1257 + check_walign32( R_EAX );
21.1258 + ADD_imm8s_r32( -4, R_EAX );
21.1259 + MMU_TRANSLATE_WRITE( R_EAX );
21.1260 + load_spreg( R_EDX, R_SSR );
21.1261 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1262 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1263 sh4_x86.tstate = TSTATE_NONE;
21.1264 :}
21.1265 STC.L SPC, @-Rn {:
21.1266 check_priv();
21.1267 - load_reg( R_ECX, Rn );
21.1268 - check_walign32( R_ECX );
21.1269 - ADD_imm8s_r32( -4, R_ECX );
21.1270 - store_reg( R_ECX, Rn );
21.1271 - load_spreg( R_EAX, R_SPC );
21.1272 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1273 + load_reg( R_EAX, Rn );
21.1274 + check_walign32( R_EAX );
21.1275 + ADD_imm8s_r32( -4, R_EAX );
21.1276 + MMU_TRANSLATE_WRITE( R_EAX );
21.1277 + load_spreg( R_EDX, R_SPC );
21.1278 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1279 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1280 sh4_x86.tstate = TSTATE_NONE;
21.1281 :}
21.1282 STC.L SGR, @-Rn {:
21.1283 check_priv();
21.1284 - load_reg( R_ECX, Rn );
21.1285 - check_walign32( R_ECX );
21.1286 - ADD_imm8s_r32( -4, R_ECX );
21.1287 - store_reg( R_ECX, Rn );
21.1288 - load_spreg( R_EAX, R_SGR );
21.1289 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1290 + load_reg( R_EAX, Rn );
21.1291 + check_walign32( R_EAX );
21.1292 + ADD_imm8s_r32( -4, R_EAX );
21.1293 + MMU_TRANSLATE_WRITE( R_EAX );
21.1294 + load_spreg( R_EDX, R_SGR );
21.1295 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1296 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1297 sh4_x86.tstate = TSTATE_NONE;
21.1298 :}
21.1299 STC.L DBR, @-Rn {:
21.1300 check_priv();
21.1301 - load_reg( R_ECX, Rn );
21.1302 - check_walign32( R_ECX );
21.1303 - ADD_imm8s_r32( -4, R_ECX );
21.1304 - store_reg( R_ECX, Rn );
21.1305 - load_spreg( R_EAX, R_DBR );
21.1306 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1307 + load_reg( R_EAX, Rn );
21.1308 + check_walign32( R_EAX );
21.1309 + ADD_imm8s_r32( -4, R_EAX );
21.1310 + MMU_TRANSLATE_WRITE( R_EAX );
21.1311 + load_spreg( R_EDX, R_DBR );
21.1312 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1313 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1314 sh4_x86.tstate = TSTATE_NONE;
21.1315 :}
21.1316 STC.L Rm_BANK, @-Rn {:
21.1317 check_priv();
21.1318 - load_reg( R_ECX, Rn );
21.1319 - check_walign32( R_ECX );
21.1320 - ADD_imm8s_r32( -4, R_ECX );
21.1321 - store_reg( R_ECX, Rn );
21.1322 - load_spreg( R_EAX, REG_OFFSET(r_bank[Rm_BANK]) );
21.1323 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1324 + load_reg( R_EAX, Rn );
21.1325 + check_walign32( R_EAX );
21.1326 + ADD_imm8s_r32( -4, R_EAX );
21.1327 + MMU_TRANSLATE_WRITE( R_EAX );
21.1328 + load_spreg( R_EDX, REG_OFFSET(r_bank[Rm_BANK]) );
21.1329 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1330 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1331 sh4_x86.tstate = TSTATE_NONE;
21.1332 :}
21.1333 STC.L GBR, @-Rn {:
21.1334 - load_reg( R_ECX, Rn );
21.1335 - check_walign32( R_ECX );
21.1336 - ADD_imm8s_r32( -4, R_ECX );
21.1337 - store_reg( R_ECX, Rn );
21.1338 - load_spreg( R_EAX, R_GBR );
21.1339 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1340 + load_reg( R_EAX, Rn );
21.1341 + check_walign32( R_EAX );
21.1342 + ADD_imm8s_r32( -4, R_EAX );
21.1343 + MMU_TRANSLATE_WRITE( R_EAX );
21.1344 + load_spreg( R_EDX, R_GBR );
21.1345 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1346 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1347 sh4_x86.tstate = TSTATE_NONE;
21.1348 :}
21.1349 STS FPSCR, Rn {:
21.1350 @@ -2499,12 +2577,13 @@
21.1351 store_reg( R_EAX, Rn );
21.1352 :}
21.1353 STS.L FPSCR, @-Rn {:
21.1354 - load_reg( R_ECX, Rn );
21.1355 - check_walign32( R_ECX );
21.1356 - ADD_imm8s_r32( -4, R_ECX );
21.1357 - store_reg( R_ECX, Rn );
21.1358 - load_spreg( R_EAX, R_FPSCR );
21.1359 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1360 + load_reg( R_EAX, Rn );
21.1361 + check_walign32( R_EAX );
21.1362 + ADD_imm8s_r32( -4, R_EAX );
21.1363 + MMU_TRANSLATE_WRITE( R_EAX );
21.1364 + load_spreg( R_EDX, R_FPSCR );
21.1365 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1366 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1367 sh4_x86.tstate = TSTATE_NONE;
21.1368 :}
21.1369 STS FPUL, Rn {:
21.1370 @@ -2512,12 +2591,13 @@
21.1371 store_reg( R_EAX, Rn );
21.1372 :}
21.1373 STS.L FPUL, @-Rn {:
21.1374 - load_reg( R_ECX, Rn );
21.1375 - check_walign32( R_ECX );
21.1376 - ADD_imm8s_r32( -4, R_ECX );
21.1377 - store_reg( R_ECX, Rn );
21.1378 - load_spreg( R_EAX, R_FPUL );
21.1379 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1380 + load_reg( R_EAX, Rn );
21.1381 + check_walign32( R_EAX );
21.1382 + ADD_imm8s_r32( -4, R_EAX );
21.1383 + MMU_TRANSLATE_WRITE( R_EAX );
21.1384 + load_spreg( R_EDX, R_FPUL );
21.1385 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1386 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1387 sh4_x86.tstate = TSTATE_NONE;
21.1388 :}
21.1389 STS MACH, Rn {:
21.1390 @@ -2525,12 +2605,13 @@
21.1391 store_reg( R_EAX, Rn );
21.1392 :}
21.1393 STS.L MACH, @-Rn {:
21.1394 - load_reg( R_ECX, Rn );
21.1395 - check_walign32( R_ECX );
21.1396 - ADD_imm8s_r32( -4, R_ECX );
21.1397 - store_reg( R_ECX, Rn );
21.1398 - load_spreg( R_EAX, R_MACH );
21.1399 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1400 + load_reg( R_EAX, Rn );
21.1401 + check_walign32( R_EAX );
21.1402 + ADD_imm8s_r32( -4, R_EAX );
21.1403 + MMU_TRANSLATE_WRITE( R_EAX );
21.1404 + load_spreg( R_EDX, R_MACH );
21.1405 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1406 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1407 sh4_x86.tstate = TSTATE_NONE;
21.1408 :}
21.1409 STS MACL, Rn {:
21.1410 @@ -2538,12 +2619,13 @@
21.1411 store_reg( R_EAX, Rn );
21.1412 :}
21.1413 STS.L MACL, @-Rn {:
21.1414 - load_reg( R_ECX, Rn );
21.1415 - check_walign32( R_ECX );
21.1416 - ADD_imm8s_r32( -4, R_ECX );
21.1417 - store_reg( R_ECX, Rn );
21.1418 - load_spreg( R_EAX, R_MACL );
21.1419 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1420 + load_reg( R_EAX, Rn );
21.1421 + check_walign32( R_EAX );
21.1422 + ADD_imm8s_r32( -4, R_EAX );
21.1423 + MMU_TRANSLATE_WRITE( R_EAX );
21.1424 + load_spreg( R_EDX, R_MACL );
21.1425 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1426 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1427 sh4_x86.tstate = TSTATE_NONE;
21.1428 :}
21.1429 STS PR, Rn {:
21.1430 @@ -2551,12 +2633,13 @@
21.1431 store_reg( R_EAX, Rn );
21.1432 :}
21.1433 STS.L PR, @-Rn {:
21.1434 - load_reg( R_ECX, Rn );
21.1435 - check_walign32( R_ECX );
21.1436 - ADD_imm8s_r32( -4, R_ECX );
21.1437 - store_reg( R_ECX, Rn );
21.1438 - load_spreg( R_EAX, R_PR );
21.1439 - MEM_WRITE_LONG( R_ECX, R_EAX );
21.1440 + load_reg( R_EAX, Rn );
21.1441 + check_walign32( R_EAX );
21.1442 + ADD_imm8s_r32( -4, R_EAX );
21.1443 + MMU_TRANSLATE_WRITE( R_EAX );
21.1444 + load_spreg( R_EDX, R_PR );
21.1445 + ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
21.1446 + MEM_WRITE_LONG( R_EAX, R_EDX );
21.1447 sh4_x86.tstate = TSTATE_NONE;
21.1448 :}
21.1449
22.1 --- a/src/sh4/xltcache.c Sun Jan 06 12:24:18 2008 +0000
22.2 +++ b/src/sh4/xltcache.c Thu Jan 10 08:28:37 2008 +0000
22.3 @@ -208,27 +208,33 @@
22.4 return result;
22.5 }
22.6
22.7 -void *xlat_get_code_by_vma( sh4vma_t vma )
22.8 +xlat_recovery_record_t xlat_get_recovery( void *code, void *native_pc, gboolean recover_after )
22.9 {
22.10 - void *result = NULL;
22.11 -
22.12 -
22.13 - if( !IS_IN_ICACHE(vma) ) {
22.14 - if( !mmu_update_icache(sh4r.pc) ) {
22.15 - // fault - off to the fault handler
22.16 - if( !mmu_update_icache(sh4r.pc) ) {
22.17 - // double fault - halt
22.18 - dreamcast_stop();
22.19 - ERROR( "Double fault - halting" );
22.20 + if( code != NULL ) {
22.21 + xlat_cache_block_t block = BLOCK_FOR_CODE(code);
22.22 + uint32_t count = block->recover_table_size;
22.23 + xlat_recovery_record_t records = block->recover_table;
22.24 + uint32_t posn;
22.25 + if( recover_after ) {
22.26 + if( records[count-1].xlat_pc <= (uintptr_t)native_pc ) {
22.27 return NULL;
22.28 }
22.29 + for( posn=count-1; posn > 0; posn-- ) {
22.30 + if( records[posn-1].xlat_pc <= (uintptr_t)native_pc ) {
22.31 + return &records[posn];
22.32 + }
22.33 + }
22.34 + return &records[0]; // shouldn't happen
22.35 + } else {
22.36 + for( posn = 1; posn < count; posn++ ) {
22.37 + if( records[posn].xlat_pc > (uintptr_t)native_pc ) {
22.38 + return &records[posn-1];
22.39 + }
22.40 + }
22.41 + return &records[count-1];
22.42 }
22.43 }
22.44 - if( sh4_icache.page_vma != -1 ) {
22.45 - result = xlat_get_code( GET_ICACHE_PHYS(vma) );
22.46 - }
22.47 -
22.48 - return result;
22.49 + return NULL;
22.50 }
22.51
22.52 void **xlat_get_lut_entry( sh4addr_t address )
22.53 @@ -254,6 +260,16 @@
22.54 return xlt->size;
22.55 }
22.56
22.57 +uint32_t xlat_get_code_size( void *block )
22.58 +{
22.59 + xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
22.60 + if( xlt->recover_table == NULL ) {
22.61 + return xlt->size;
22.62 + } else {
22.63 + return ((uint8_t *)xlt->recover_table) - ((uint8_t *)block);
22.64 + }
22.65 +}
22.66 +
22.67 /**
22.68 * Cut the specified block so that it has the given size, with the remaining data
22.69 * forming a new free block. If the free block would be less than the minimum size,
23.1 --- a/src/sh4/xltcache.h Sun Jan 06 12:24:18 2008 +0000
23.2 +++ b/src/sh4/xltcache.h Thu Jan 10 08:28:37 2008 +0000
23.3 @@ -19,10 +19,30 @@
23.4 #include "dream.h"
23.5 #include "mem.h"
23.6
23.7 +#ifndef lxdream_xltcache_H
23.8 +#define lxdream_xltcache_H
23.9 +
23.10 +/**
23.11 + * For now, recovery is purely a matter of mapping native pc => sh4 pc,
23.12 + * and updating sh4r.pc & sh4r.slice_cycles accordingly. In future more
23.13 + * detailed recovery may be required if the translator optimizes more
23.14 + * agressively.
23.15 + *
23.16 + * The recovery table contains (at least) one entry per abortable instruction,
23.17 + *
23.18 + */
23.19 +typedef struct xlat_recovery_record {
23.20 + uintptr_t xlat_pc; // native (translated) pc
23.21 + uint32_t sh4_icount; // instruction number of the corresponding SH4 instruction
23.22 + // (0 = first instruction, 1 = second instruction, ... )
23.23 +} *xlat_recovery_record_t;
23.24 +
23.25 typedef struct xlat_cache_block {
23.26 int active; /* 0 = deleted, 1 = normal. 2 = accessed (temp-space only) */
23.27 uint32_t size;
23.28 void **lut_entry; /* For deletion */
23.29 + xlat_recovery_record_t recover_table;
23.30 + uint32_t recover_table_size;
23.31 unsigned char code[0];
23.32 } *xlat_cache_block_t;
23.33
23.34 @@ -74,6 +94,17 @@
23.35 void *xlat_get_code( sh4addr_t address );
23.36
23.37 /**
23.38 + * Retrieve the recovery record corresponding to the given
23.39 + * native address, or NULL if there is no recovery code for the address.
23.40 + * @param code The code block containing the recovery table.
23.41 + * @param native_pc A pointer that must be within the currently executing
23.42 + * @param recover_after If TRUE, return the first record after the given pc, otherwise
23.43 + * return the first record before or equal to the given pc.
23.44 + * translation block.
23.45 + */
23.46 +struct xlat_recovery_record *xlat_get_recovery( void *code, void *native_pc, gboolean recover_after );
23.47 +
23.48 +/**
23.49 * Retrieve the entry point for the translated code corresponding to the given
23.50 * SH4 virtual address, or NULL if there is no code for the address.
23.51 * If the virtual address cannot be resolved, this method will raise a TLB miss
23.52 @@ -88,12 +119,28 @@
23.53 void **xlat_get_lut_entry( sh4addr_t address );
23.54
23.55 /**
23.56 - * Retrieve the size of the code block starting at the specified pointer. If the
23.57 + * Retrieve the current host address of the running translated code block.
23.58 + * @return the host PC, or null if there is no currently executing translated
23.59 + * block (or the stack is corrupted)
23.60 + * Note: this method is implemented in host-specific asm.
23.61 + */
23.62 +void *xlat_get_native_pc();
23.63 +
23.64 +/**
23.65 + * Retrieve the size of the block starting at the specified pointer. If the
23.66 * pointer is not a valid code block, the return value is undefined.
23.67 */
23.68 uint32_t xlat_get_block_size( void *ptr );
23.69
23.70 /**
23.71 + * Retrieve the size of the code in the block starting at the specified
23.72 + * pointer. Effectively this is xlat_get_block_size() minus the size of
23.73 + * the recovery table. If the pointer is not a valid code block, the
23.74 + * return value is undefined.
23.75 + */
23.76 +uint32_t xlat_get_code_size( void *ptr );
23.77 +
23.78 +/**
23.79 * Flush the code cache for the page containing the given address
23.80 */
23.81 void xlat_flush_page( sh4addr_t address );
23.82 @@ -116,3 +163,5 @@
23.83 * Check the internal integrity of the cache
23.84 */
23.85 void xlat_check_integrity();
23.86 +
23.87 +#endif /* lxdream_xltcache_H */
24.1 --- a/src/test/testsh4x86.c Sun Jan 06 12:24:18 2008 +0000
24.2 +++ b/src/test/testsh4x86.c Thu Jan 10 08:28:37 2008 +0000
24.3 @@ -64,8 +64,8 @@
24.4 // Stubs
24.5 gboolean sh4_execute_instruction( ) { }
24.6 void sh4_accept_interrupt() {}
24.7 -void sh4_set_breakpoint( uint32_t pc, int type ) { }
24.8 -gboolean sh4_clear_breakpoint( uint32_t pc, int type ) { }
24.9 +void sh4_set_breakpoint( uint32_t pc, breakpoint_type_t type ) { }
24.10 +gboolean sh4_clear_breakpoint( uint32_t pc, breakpoint_type_t type ) { }
24.11 int sh4_get_breakpoint( uint32_t pc ) { }
24.12 void event_execute() {}
24.13 void TMU_run_slice( uint32_t nanos ) {}
24.14 @@ -77,6 +77,9 @@
24.15 void sh4_flush_store_queue( uint32_t addr ) {}
24.16 void sh4_write_sr( uint32_t val ) { }
24.17 void syscall_invoke( uint32_t val ) { }
24.18 +void dreamcast_stop() {}
24.19 +sh4addr_t mmu_vma_to_phys_read( sh4vma_t vma ) { return vma & 0x1FFFFFFF; }
24.20 +sh4addr_t mmu_vma_to_phys_write( sh4vma_t vma ) { return vma & 0x1FFFFFFF; }
24.21 uint32_t sh4_read_sr( void ) { }
24.22 gboolean sh4_raise_exception( int exc ) {}
24.23 gboolean sh4_raise_trap( int exc ) {}
24.24 @@ -84,10 +87,8 @@
24.25 void sh4_fsca( uint32_t angle, float *fr ) { }
24.26 void sh4_ftrv( float *fv, float *xmtrx ) { }
24.27 void signsat48(void) { }
24.28 -uint16_t *sh4_icache = NULL;
24.29 -uint32_t sh4_icache_addr = 0;
24.30 gboolean gui_error_dialog( const char *fmt, ... ) { }
24.31 -
24.32 +struct sh4_icache_struct sh4_icache;
24.33
24.34 void usage()
24.35 {
25.1 --- a/src/x86dasm/x86dasm.c Sun Jan 06 12:24:18 2008 +0000
25.2 +++ b/src/x86dasm/x86dasm.c Thu Jan 10 08:28:37 2008 +0000
25.3 @@ -43,7 +43,7 @@
25.4
25.5 void xlat_disasm_block( FILE *out, void *block )
25.6 {
25.7 - uint32_t buflen = xlat_get_block_size(block);
25.8 + uint32_t buflen = xlat_get_code_size(block);
25.9 x86_set_symtab( NULL, 0 );
25.10 x86_disasm_block( out, block, buflen );
25.11 }
.