Remove mmx

This commit is contained in:
MrDave
2019-09-27 21:53:35 -06:00
committed by Mr-Dave
parent 96a8fbd285
commit 4142fed9ff
2 changed files with 0 additions and 489 deletions

222
src/alg.c
View File

@@ -9,11 +9,6 @@
#include "motion.h"
#include "alg.h"
#ifdef __MMX__
#define HAVE_MMX
#include "mmx.h"
#endif
#define MAX2(x, y) ((x) > (y) ? (x) : (y))
#define MAX3(x, y, z) ((x) > (y) ? ((x) > (z) ? (x) : (z)) : ((y) > (z) ? (y) : (z)))
@@ -943,228 +938,11 @@ int alg_diff_standard(struct context *cnt, unsigned char *new)
unsigned char *mask = imgs->mask;
unsigned char *smartmask_final = imgs->smartmask_final;
int *smartmask_buffer = imgs->smartmask_buffer;
#ifdef HAVE_MMX
mmx_t mmtemp; /* Used for transferring to/from memory. */
int unload; /* Counter for unloading diff counts. */
#endif
i = imgs->motionsize;
memset(out + i, 128, i / 2); /* Motion pictures are now b/w i.o. green */
/*
* Keeping this memset in the MMX case when zeroes are necessarily
* written anyway seems to be beneficial in terms of speed. Perhaps a
* cache thing?
*/
memset(out, 0, i);
#ifdef HAVE_MMX
/*
* NOTE: The Pentium has two instruction pipes: U and V. I have grouped MMX
* instructions in pairs according to how I think they will be scheduled in
* the U and V pipes. Due to pairing constraints, the V pipe will sometimes
* be empty (for example, memory access always goes into the U pipe).
*
* The following MMX registers are kept throughout the loop:
* mm5 - 8 separate diff counters (unloaded periodically)
* mm6 - mask: 00ff 00ff 00ff 00ff
* mm7 - noise level as 8 packed bytes
*
* -- Per Jonsson
*/
/*
* To avoid a div, we work with differences multiplied by 255 in the
* default case and *mask otherwise. Thus, the limit to compare with is
* 255 * (noise + 1) - 1).
*/
mmtemp.uw[0] = mmtemp.uw[1] = mmtemp.uw[2] = mmtemp.uw[3] =
(unsigned short)(noise * 255 + 254);
/*
* Reset mm5 to zero, set the mm6 mask, and store the multiplied noise
* level as four words in mm7.
*/
movq_m2r(mmtemp, mm7); /* U */
pcmpeqb_r2r(mm6, mm6); /* V */
pxor_r2r(mm5, mm5); /* U */
psrlw_i2r(8, mm6); /* V */
/*
* We must unload mm5 every 255th round, because the diffs accumulate
* in each packed byte, which can hold at most 255 diffs before it
* gets saturated.
*/
unload = 255;
for (; i > 7; i -= 8) {
/* Calculate abs(*ref-*new) for 8 pixels in parallel. */
movq_m2r(*ref, mm0); /* U: mm0 = r7 r6 r5 r4 r3 r2 r1 r0 */
pxor_r2r(mm4, mm4); /* V: mm4 = 0 */
movq_m2r(*new, mm1); /* U: mm1 = n7 n6 n5 n4 n3 n2 n1 n0 */
movq_r2r(mm0, mm2); /* V: mm2 = r7 r6 r5 r4 r3 r2 r1 r0 */
/* These subtractions are saturated, i.e. won't go below 0. */
psubusb_r2r(mm1, mm0); /* U: mm0 = (r7-n7) ... (r0-n0) */
psubusb_r2r(mm2, mm1); /* V: mm1 = (n7-r7) ... (n0-r0) */
/* Each byte dX in mm0 is abs(nX-rX). */
por_r2r(mm1, mm0); /* U: mm0 = d7 d6 d5 d4 d3 d2 d1 d0 */
/* Expand the absolute differences to words in mm0 and mm1. */
movq_r2r(mm0, mm1); /* U: mm1 = d7 d6 d5 d4 d3 d2 d1 d0 */
punpcklbw_r2r(mm4, mm0); /* V: mm0 = d3 d2 d1 d0 */
punpckhbw_r2r(mm4, mm1); /* U: mm1 = d7 d6 d5 d4 */
if (mask) {
/*
* Load and expand 8 mask bytes to words in mm2 and mm3. Then
* multiply by mm0 and mm1, respectively.
*/
movq_m2r(*mask, mm2); /* U: mm2 = m7 m6 m5 m4 m3 m2 m1 m0 */
movq_r2r(mm2, mm3); /* U: mm3 = m7 m6 m5 m4 m3 m2 m1 m0 */
punpcklbw_r2r(mm4, mm2); /* v: mm2 = m3 m2 m1 m0 */
punpckhbw_r2r(mm4, mm3); /* U: mm3 = m7 m6 m5 m4 */
pmullw_r2r(mm2, mm0); /* V: mm0 = (d3*m3) ... (d0*m0) */
pmullw_r2r(mm3, mm1); /* U: mm1 = (d7*m7) ... (d4*m4) */
mask += 8;
} else {
/*
* Not using mask - multiply the absolute differences by 255. We
* do this by left-shifting 8 places and then subtracting dX.
*/
movq_r2r(mm0, mm2); /* U: mm2 = d3 d2 d1 d0 */
psllw_i2r(8, mm0); /* V: mm2 = (256*d3) ... (256*d0) */
movq_r2r(mm1, mm3); /* U: mm3 = d7 d6 d5 d4 */
psllw_i2r(8, mm1); /* V: mm3 = (256*d7) ... (256*d4) */
psubusw_r2r(mm2, mm0); /* U */
psubusw_r2r(mm3, mm1); /* V */
}
/*
* Next, compare the multiplied absolute differences with the multiplied
* noise level (repeated as 4 words in mm7), resulting in a "motion flag"
* for each pixel.
*
* Since pcmpgtw performs signed comparisons, we have to subtract noise,
* test for equality to 0 and then invert the result.
*
* Note that it is safe to generate the "motion flags" before the
* smartmask code, as all that can happen is that individual flags get
* reset to 0 because of the smartmask.
*/
psubusw_r2r(mm7, mm0); /* U: subtract by (multiplied) noise */
psubusw_r2r(mm7, mm1); /* V */
pcmpeqw_r2r(mm4, mm0); /* U: test for equality with 0 */
pcmpeqw_r2r(mm4, mm1); /* V */
pand_r2r(mm6, mm0); /* U: convert 0xffff -> 0x00ff */
pand_r2r(mm6, mm1); /* V */
pxor_r2r(mm6, mm0); /* U: invert the result */
pxor_r2r(mm6, mm1); /* V */
/* Each fX is the "motion flag" = 0 for no motion, 0xff for motion. */
packuswb_r2r(mm1, mm0); /* U: mm0 = f7 f6 f5 f4 f3 f2 f1 f0 */
if (smartmask_speed) {
/*
* Apply the smartmask. Basically, if *smartmask_final is 0, the
* corresponding "motion flag" in mm0 will be reset.
*/
movq_m2r(*smartmask_final, mm3); /* U: mm3 = s7 s6 s5 s4 s3 s2 s1 s0 */
/*
* ...but move the "motion flags" to memory before, in order to
* increment *smartmask_buffer properly below.
*/
movq_r2m(mm0, mmtemp); /* U */
pcmpeqb_r2r(mm4, mm3); /* V: mm3 = 0xff where sX==0 */
/* AND negates the target before anding. */
pandn_r2r(mm0, mm3); /* U: mm3 = 0xff where dX>noise && sX>0 */
movq_r2r(mm3, mm0); /* U */
/* Add to *smartmask_buffer. This is probably the fastest way to do it. */
if (cnt->event_nr != cnt->prev_event) {
if (mmtemp.ub[0]) smartmask_buffer[0] += SMARTMASK_SENSITIVITY_INCR;
if (mmtemp.ub[1]) smartmask_buffer[1] += SMARTMASK_SENSITIVITY_INCR;
if (mmtemp.ub[2]) smartmask_buffer[2] += SMARTMASK_SENSITIVITY_INCR;
if (mmtemp.ub[3]) smartmask_buffer[3] += SMARTMASK_SENSITIVITY_INCR;
if (mmtemp.ub[4]) smartmask_buffer[4] += SMARTMASK_SENSITIVITY_INCR;
if (mmtemp.ub[5]) smartmask_buffer[5] += SMARTMASK_SENSITIVITY_INCR;
if (mmtemp.ub[6]) smartmask_buffer[6] += SMARTMASK_SENSITIVITY_INCR;
if (mmtemp.ub[7]) smartmask_buffer[7] += SMARTMASK_SENSITIVITY_INCR;
}
smartmask_buffer += 8;
smartmask_final += 8;
}
movq_m2r(*new, mm2); /* U: mm1 = n7 n6 n5 n4 n3 n2 n1 n0 */
/*
* Cancel out pixels in *new according to the "motion flags" in mm0.
* Each NX is either 0 or nX as from *new.
*/
pand_r2r(mm0, mm2); /* U: mm1 = N7 N6 N5 N4 N3 N2 N1 N0 */
psubb_r2r(mm0, mm4); /* V: mm4 = 0x01 where dX>noise */
/*
* mm5 holds 8 separate counts - each one is increased according to
* the contents of mm4 (where each byte is either 0x00 or 0x01).
*/
movq_r2m(mm2, *out); /* U: this will stall */
paddusb_r2r(mm4, mm5); /* V: add counts to mm5 */
/*
* Every 255th turn, we need to unload mm5 into the diffs variable,
* because otherwise the packed bytes will get saturated.
*/
if (--unload == 0) {
/* Unload mm5 to memory and reset it. */
movq_r2m(mm5, mmtemp); /* U */
pxor_r2r(mm5, mm5); /* V: mm5 = 0 */
diffs += mmtemp.ub[0] + mmtemp.ub[1] + mmtemp.ub[2] + mmtemp.ub[3] +
mmtemp.ub[4] + mmtemp.ub[5] + mmtemp.ub[6] + mmtemp.ub[7];
unload = 255;
}
out += 8;
ref += 8;
new += 8;
}
/*
* Check if there are diffs left in mm5 that need to be copied to the
* diffs variable.
*/
if (unload < 255) {
movq_r2m(mm5, mmtemp);
diffs += mmtemp.ub[0] + mmtemp.ub[1] + mmtemp.ub[2] + mmtemp.ub[3] +
mmtemp.ub[4] + mmtemp.ub[5] + mmtemp.ub[6] + mmtemp.ub[7];
}
emms();
#endif
/*
* Note that the non-MMX code is present even if the MMX code is present.
* This is necessary if the resolution is not a multiple of 8, in which
* case the non-MMX code needs to take care of the remaining pixels.
*/
for (; i > 0; i--) {
register unsigned char curdiff = (int)(abs(*ref - *new)); /* Using a temp variable is 12% faster. */
/* Apply fixed mask */

267
src/mmx.h
View File

@@ -1,267 +0,0 @@
/*
* mmx.h
* Copyright (C) 1997-2001 H. Dietz and R. Fisher
*/
#ifndef I386MMX_H
#define I386MMX_H
/*
* The type of an value that fits in an MMX register (note that long
* long constant values MUST be suffixed by LL and unsigned long long
* values by ULL, lest they be truncated by the compiler)
*/
typedef union {
long long q; /* Quadword (64-bit) value */
unsigned long long uq; /* Unsigned Quadword */
int d[2]; /* 2 Doubleword (32-bit) values */
unsigned int ud[2]; /* 2 Unsigned Doubleword */
short w[4]; /* 4 Word (16-bit) values */
unsigned short uw[4]; /* 4 Unsigned Word */
char b[8]; /* 8 Byte (8-bit) values */
unsigned char ub[8]; /* 8 Unsigned Byte */
float s[2]; /* Single-precision (32-bit) value */
} mmx_t; /* On an 8-byte (64-bit) boundary */
#define mmx_i2r(op,imm,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \
: /* nothing */ \
: "i" (imm))
#define mmx_m2r(op,mem,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \
: /* nothing */ \
: "m" (mem))
#define mmx_r2m(op,reg,mem) \
__asm__ __volatile__ (#op " %%" #reg ", %0" \
: "=m" (mem) \
: /* nothing */ )
#define mmx_r2r(op,regs,regd) \
__asm__ __volatile__ (#op " %" #regs ", %" #regd)
#define emms() __asm__ __volatile__ ("emms")
#define movd_m2r(var,reg) mmx_m2r (movd, var, reg)
#define movd_r2m(reg,var) mmx_r2m (movd, reg, var)
#define movd_r2r(regs,regd) mmx_r2r (movd, regs, regd)
#define movq_m2r(var,reg) mmx_m2r (movq, var, reg)
#define movq_r2m(reg,var) mmx_r2m (movq, reg, var)
#define movq_r2r(regs,regd) mmx_r2r (movq, regs, regd)
#define packssdw_m2r(var,reg) mmx_m2r (packssdw, var, reg)
#define packssdw_r2r(regs,regd) mmx_r2r (packssdw, regs, regd)
#define packsswb_m2r(var,reg) mmx_m2r (packsswb, var, reg)
#define packsswb_r2r(regs,regd) mmx_r2r (packsswb, regs, regd)
#define packuswb_m2r(var,reg) mmx_m2r (packuswb, var, reg)
#define packuswb_r2r(regs,regd) mmx_r2r (packuswb, regs, regd)
#define paddb_m2r(var,reg) mmx_m2r (paddb, var, reg)
#define paddb_r2r(regs,regd) mmx_r2r (paddb, regs, regd)
#define paddd_m2r(var,reg) mmx_m2r (paddd, var, reg)
#define paddd_r2r(regs,regd) mmx_r2r (paddd, regs, regd)
#define paddw_m2r(var,reg) mmx_m2r (paddw, var, reg)
#define paddw_r2r(regs,regd) mmx_r2r (paddw, regs, regd)
#define paddsb_m2r(var,reg) mmx_m2r (paddsb, var, reg)
#define paddsb_r2r(regs,regd) mmx_r2r (paddsb, regs, regd)
#define paddsw_m2r(var,reg) mmx_m2r (paddsw, var, reg)
#define paddsw_r2r(regs,regd) mmx_r2r (paddsw, regs, regd)
#define paddusb_m2r(var,reg) mmx_m2r (paddusb, var, reg)
#define paddusb_r2r(regs,regd) mmx_r2r (paddusb, regs, regd)
#define paddusw_m2r(var,reg) mmx_m2r (paddusw, var, reg)
#define paddusw_r2r(regs,regd) mmx_r2r (paddusw, regs, regd)
#define pand_m2r(var,reg) mmx_m2r (pand, var, reg)
#define pand_r2r(regs,regd) mmx_r2r (pand, regs, regd)
#define pandn_m2r(var,reg) mmx_m2r (pandn, var, reg)
#define pandn_r2r(regs,regd) mmx_r2r (pandn, regs, regd)
#define pcmpeqb_m2r(var,reg) mmx_m2r (pcmpeqb, var, reg)
#define pcmpeqb_r2r(regs,regd) mmx_r2r (pcmpeqb, regs, regd)
#define pcmpeqd_m2r(var,reg) mmx_m2r (pcmpeqd, var, reg)
#define pcmpeqd_r2r(regs,regd) mmx_r2r (pcmpeqd, regs, regd)
#define pcmpeqw_m2r(var,reg) mmx_m2r (pcmpeqw, var, reg)
#define pcmpeqw_r2r(regs,regd) mmx_r2r (pcmpeqw, regs, regd)
#define pcmpgtb_m2r(var,reg) mmx_m2r (pcmpgtb, var, reg)
#define pcmpgtb_r2r(regs,regd) mmx_r2r (pcmpgtb, regs, regd)
#define pcmpgtd_m2r(var,reg) mmx_m2r (pcmpgtd, var, reg)
#define pcmpgtd_r2r(regs,regd) mmx_r2r (pcmpgtd, regs, regd)
#define pcmpgtw_m2r(var,reg) mmx_m2r (pcmpgtw, var, reg)
#define pcmpgtw_r2r(regs,regd) mmx_r2r (pcmpgtw, regs, regd)
#define pmaddwd_m2r(var,reg) mmx_m2r (pmaddwd, var, reg)
#define pmaddwd_r2r(regs,regd) mmx_r2r (pmaddwd, regs, regd)
#define pmulhw_m2r(var,reg) mmx_m2r (pmulhw, var, reg)
#define pmulhw_r2r(regs,regd) mmx_r2r (pmulhw, regs, regd)
#define pmullw_m2r(var,reg) mmx_m2r (pmullw, var, reg)
#define pmullw_r2r(regs,regd) mmx_r2r (pmullw, regs, regd)
#define por_m2r(var,reg) mmx_m2r (por, var, reg)
#define por_r2r(regs,regd) mmx_r2r (por, regs, regd)
#define pslld_i2r(imm,reg) mmx_i2r (pslld, imm, reg)
#define pslld_m2r(var,reg) mmx_m2r (pslld, var, reg)
#define pslld_r2r(regs,regd) mmx_r2r (pslld, regs, regd)
#define psllq_i2r(imm,reg) mmx_i2r (psllq, imm, reg)
#define psllq_m2r(var,reg) mmx_m2r (psllq, var, reg)
#define psllq_r2r(regs,regd) mmx_r2r (psllq, regs, regd)
#define psllw_i2r(imm,reg) mmx_i2r (psllw, imm, reg)
#define psllw_m2r(var,reg) mmx_m2r (psllw, var, reg)
#define psllw_r2r(regs,regd) mmx_r2r (psllw, regs, regd)
#define psrad_i2r(imm,reg) mmx_i2r (psrad, imm, reg)
#define psrad_m2r(var,reg) mmx_m2r (psrad, var, reg)
#define psrad_r2r(regs,regd) mmx_r2r (psrad, regs, regd)
#define psraw_i2r(imm,reg) mmx_i2r (psraw, imm, reg)
#define psraw_m2r(var,reg) mmx_m2r (psraw, var, reg)
#define psraw_r2r(regs,regd) mmx_r2r (psraw, regs, regd)
#define psrld_i2r(imm,reg) mmx_i2r (psrld, imm, reg)
#define psrld_m2r(var,reg) mmx_m2r (psrld, var, reg)
#define psrld_r2r(regs,regd) mmx_r2r (psrld, regs, regd)
#define psrlq_i2r(imm,reg) mmx_i2r (psrlq, imm, reg)
#define psrlq_m2r(var,reg) mmx_m2r (psrlq, var, reg)
#define psrlq_r2r(regs,regd) mmx_r2r (psrlq, regs, regd)
#define psrlw_i2r(imm,reg) mmx_i2r (psrlw, imm, reg)
#define psrlw_m2r(var,reg) mmx_m2r (psrlw, var, reg)
#define psrlw_r2r(regs,regd) mmx_r2r (psrlw, regs, regd)
#define psubb_m2r(var,reg) mmx_m2r (psubb, var, reg)
#define psubb_r2r(regs,regd) mmx_r2r (psubb, regs, regd)
#define psubd_m2r(var,reg) mmx_m2r (psubd, var, reg)
#define psubd_r2r(regs,regd) mmx_r2r (psubd, regs, regd)
#define psubw_m2r(var,reg) mmx_m2r (psubw, var, reg)
#define psubw_r2r(regs,regd) mmx_r2r (psubw, regs, regd)
#define psubsb_m2r(var,reg) mmx_m2r (psubsb, var, reg)
#define psubsb_r2r(regs,regd) mmx_r2r (psubsb, regs, regd)
#define psubsw_m2r(var,reg) mmx_m2r (psubsw, var, reg)
#define psubsw_r2r(regs,regd) mmx_r2r (psubsw, regs, regd)
#define psubusb_m2r(var,reg) mmx_m2r (psubusb, var, reg)
#define psubusb_r2r(regs,regd) mmx_r2r (psubusb, regs, regd)
#define psubusw_m2r(var,reg) mmx_m2r (psubusw, var, reg)
#define psubusw_r2r(regs,regd) mmx_r2r (psubusw, regs, regd)
#define punpckhbw_m2r(var,reg) mmx_m2r (punpckhbw, var, reg)
#define punpckhbw_r2r(regs,regd) mmx_r2r (punpckhbw, regs, regd)
#define punpckhdq_m2r(var,reg) mmx_m2r (punpckhdq, var, reg)
#define punpckhdq_r2r(regs,regd) mmx_r2r (punpckhdq, regs, regd)
#define punpckhwd_m2r(var,reg) mmx_m2r (punpckhwd, var, reg)
#define punpckhwd_r2r(regs,regd) mmx_r2r (punpckhwd, regs, regd)
#define punpcklbw_m2r(var,reg) mmx_m2r (punpcklbw, var, reg)
#define punpcklbw_r2r(regs,regd) mmx_r2r (punpcklbw, regs, regd)
#define punpckldq_m2r(var,reg) mmx_m2r (punpckldq, var, reg)
#define punpckldq_r2r(regs,regd) mmx_r2r (punpckldq, regs, regd)
#define punpcklwd_m2r(var,reg) mmx_m2r (punpcklwd, var, reg)
#define punpcklwd_r2r(regs,regd) mmx_r2r (punpcklwd, regs, regd)
#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg)
#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd)
/* 3DNOW extensions */
#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg)
#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd)
/* AMD MMX extensions - also available in intel SSE */
#define mmx_m2ri(op,mem,reg,imm) \
__asm__ __volatile__ (#op " %1, %0, %%" #reg \
: /* nothing */ \
: "X" (mem), "X" (imm))
#define mmx_r2ri(op,regs,regd,imm) \
__asm__ __volatile__ (#op " %0, %%" #regs ", %%" #regd \
: /* nothing */ \
: "X" (imm))
#define mmx_fetch(mem,hint) \
__asm__ __volatile__ ("prefetch" #hint " %0" \
: /* nothing */ \
: "X" (mem))
#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg)
#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var)
#define pavgb_m2r(var,reg) mmx_m2r (pavgb, var, reg)
#define pavgb_r2r(regs,regd) mmx_r2r (pavgb, regs, regd)
#define pavgw_m2r(var,reg) mmx_m2r (pavgw, var, reg)
#define pavgw_r2r(regs,regd) mmx_r2r (pavgw, regs, regd)
#define pextrw_r2r(mmreg,reg,imm) mmx_r2ri (pextrw, mmreg, reg, imm)
#define pinsrw_r2r(reg,mmreg,imm) mmx_r2ri (pinsrw, reg, mmreg, imm)
#define pmaxsw_m2r(var,reg) mmx_m2r (pmaxsw, var, reg)
#define pmaxsw_r2r(regs,regd) mmx_r2r (pmaxsw, regs, regd)
#define pmaxub_m2r(var,reg) mmx_m2r (pmaxub, var, reg)
#define pmaxub_r2r(regs,regd) mmx_r2r (pmaxub, regs, regd)
#define pminsw_m2r(var,reg) mmx_m2r (pminsw, var, reg)
#define pminsw_r2r(regs,regd) mmx_r2r (pminsw, regs, regd)
#define pminub_m2r(var,reg) mmx_m2r (pminub, var, reg)
#define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd)
#define pmovmskb(mmreg,reg) \
__asm__ __volatile__ ("movmskps %" #mmreg ", %" #reg)
#define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg)
#define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd)
#define prefetcht0(mem) mmx_fetch (mem, t0)
#define prefetcht1(mem) mmx_fetch (mem, t1)
#define prefetcht2(mem) mmx_fetch (mem, t2)
#define prefetchnta(mem) mmx_fetch (mem, nta)
#define psadbw_m2r(var,reg) mmx_m2r (psadbw, var, reg)
#define psadbw_r2r(regs,regd) mmx_r2r (psadbw, regs, regd)
#define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm)
#define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm)
#define sfence() __asm__ __volatile__ ("sfence\n\t")
/* SSE2 */
#define pshufhw_m2r(var,reg,imm) mmx_m2ri(pshufhw, var, reg, imm)
#define pshufhw_r2r(regs,regd,imm) mmx_r2ri(pshufhw, regs, regd, imm)
#define pshuflw_m2r(var,reg,imm) mmx_m2ri(pshuflw, var, reg, imm)
#define pshuflw_r2r(regs,regd,imm) mmx_r2ri(pshuflw, regs, regd, imm)
#define pshufd_r2r(regs,regd,imm) mmx_r2ri(pshufd, regs, regd, imm)
#define movdqa_m2r(var,reg) mmx_m2r (movdqa, var, reg)
#define movdqa_r2m(reg,var) mmx_r2m (movdqa, reg, var)
#define movdqa_r2r(regs,regd) mmx_r2r (movdqa, regs, regd)
#define movdqu_m2r(var,reg) mmx_m2r (movdqu, var, reg)
#define movdqu_r2m(reg,var) mmx_r2m (movdqu, reg, var)
#define movdqu_r2r(regs,regd) mmx_r2r (movdqu, regs, regd)
#define pmullw_r2m(reg,var) mmx_r2m (pmullw, reg, var)
#define pslldq_i2r(imm,reg) mmx_i2r (pslldq, imm, reg)
#define psrldq_i2r(imm,reg) mmx_i2r (psrldq, imm, reg)
#define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd)
#define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd)
#endif /* I386MMX_H */